summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu2
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst2
-rw-r--r--Documentation/admin-guide/hw-vuln/multihit.rst163
-rw-r--r--Documentation/admin-guide/hw-vuln/tsx_async_abort.rst276
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt92
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt6
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.txt1
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml111
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml240
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83869.yaml84
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt6
-rw-r--r--Documentation/networking/device_drivers/aquantia/atlantic.txt46
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa.txt12
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst21
-rw-r--r--Documentation/networking/device_drivers/ti/cpsw_switchdev.txt209
-rw-r--r--Documentation/networking/devlink-params-mlx5.txt17
-rw-r--r--Documentation/networking/devlink-params-ti-cpsw-switch.txt10
-rw-r--r--Documentation/networking/devlink-params.txt4
-rw-r--r--Documentation/networking/devlink-trap.rst61
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/tls-offload.rst4
-rw-r--r--Documentation/x86/index.rst1
-rw-r--r--Documentation/x86/tsx_async_abort.rst117
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile5
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts27
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts5
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts5
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi52
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi8
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts13
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/mach-pxa/icontrol.c9
-rw-r--r--arch/arm/mach-pxa/zeus.c9
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi2
-rw-r--r--arch/arm64/include/asm/pgtable.h17
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h7
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h7
-rw-r--r--arch/mips/sgi-ip27/Kconfig7
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c21
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c4
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h1
-rw-r--r--arch/powerpc/include/asm/elf.h3
-rw-r--r--arch/powerpc/kernel/prom_init.c13
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh3
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c13
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c53
-rw-r--r--arch/s390/include/asm/unwind.h1
-rw-r--r--arch/s390/kernel/idle.c29
-rw-r--r--arch/s390/kernel/unwind_bc.c18
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/sparc/vdso/Makefile4
-rw-r--r--arch/x86/Kconfig45
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/include/asm/msr-index.h16
-rw-r--r--arch/x86/include/asm/nospec-branch.h4
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/kernel/apic/apic.c28
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c159
-rw-r--r--arch/x86/kernel/cpu/common.c99
-rw-r--r--arch/x86/kernel/cpu/cpu.h18
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c4
-rw-r--r--arch/x86/kernel/cpu/tsx.c140
-rw-r--r--arch/x86/kernel/dumpstack_64.c7
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/tsc.c3
-rw-r--r--arch/x86/kvm/mmu.c282
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/paging_tmpl.h29
-rw-r--r--arch/x86/kvm/vmx/vmx.c23
-rw-r--r--arch/x86/kvm/vmx/vmx.h11
-rw-r--r--arch/x86/kvm/x86.c99
-rw-r--r--block/bfq-iosched.c32
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--block/blk-iocost.c8
-rw-r--r--drivers/base/cpu.c17
-rw-r--r--drivers/base/memory.c36
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c24
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/bluetooth/btmtksdio.c1
-rw-r--r--drivers/bluetooth/btqca.c92
-rw-r--r--drivers/bluetooth/btqca.h32
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c1
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_qca.c143
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clk/at91/clk-main.c5
-rw-r--r--drivers/clk/at91/sam9x60.c1
-rw-r--r--drivers/clk/at91/sckc.c20
-rw-r--r--drivers/clk/clk-ast2600.c7
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c2
-rw-r--r--drivers/clk/meson/g12a.c13
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c4
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c6
-rw-r--r--drivers/clk/ti/clkctrl.c5
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/timer-mediatek.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c10
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c15
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c111
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c435
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h31
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c122
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c4
-rw-r--r--drivers/hid/wacom.h15
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/nct7904.c15
-rw-r--r--drivers/hwtracing/intel_th/gth.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c11
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/iio/adc/stm32-adc.c4
-rw-r--r--drivers/iio/imu/adis16480.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c15
-rw-r--r--drivers/iio/proximity/srf04.c29
-rw-r--r--drivers/infiniband/hw/hfi1/init.c1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c16
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c57
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c41
-rw-r--r--drivers/input/ff-memless.c9
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/rmi4/rmi_f11.c9
-rw-r--r--drivers/input/rmi4/rmi_f12.c32
-rw-r--r--drivers/input/rmi4/rmi_f54.c5
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c7
-rw-r--r--drivers/interconnect/core.c4
-rw-r--r--drivers/interconnect/qcom/qcs404.c3
-rw-r--r--drivers/interconnect/qcom/sdm845.c3
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c67
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c7
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/net/bonding/bond_main.c127
-rw-r--r--drivers/net/can/c_can/c_can.c71
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c21
-rw-r--r--drivers/net/can/dev.c6
-rw-r--r--drivers/net/can/flexcan.c142
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/m_can/m_can.c54
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h3
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/rx-offload.c140
-rw-r--r--drivers/net/can/slcan.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c77
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c252
-rw-r--r--drivers/net/can/usb/gs_usb.c1
-rw-r--r--drivers/net/can/usb/mcba_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c32
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/can/xilinx_can.c103
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/bcm_sf2.c30
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c7
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c300
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c60
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h9
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c13
-rw-r--r--drivers/net/dsa/ocelot/Kconfig11
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c441
-rw-r--r--drivers/net/dsa/ocelot/felix.h37
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c567
-rw-r--r--drivers/net/dsa/qca8k.c9
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h34
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c67
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c178
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h48
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c180
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c432
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h27
-rw-r--r--drivers/net/dummy.c36
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c235
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c232
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c95
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c112
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h54
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c207
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h228
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c253
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c15
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c7
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c5
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c109
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c192
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb.h9
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c491
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c787
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c354
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c650
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c131
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c265
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c812
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h38
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c90
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c34
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h82
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c37
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h15
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c27
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h89
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c172
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c857
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c172
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c212
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c933
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c231
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1058
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c541
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c600
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h14918
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h221
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c129
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c1711
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c876
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c187
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h40
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c52
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c575
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h15
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c303
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h27
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1064
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h476
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c117
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c32
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c36
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c56
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c59
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c929
-rw-r--r--drivers/net/ethernet/renesas/ravb.h3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c30
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c62
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c307
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig20
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1285
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c150
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2048
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1246
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h81
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c589
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.h15
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c5
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c8
-rw-r--r--drivers/net/ieee802154/cc2520.c3
-rw-r--r--drivers/net/loopback.c38
-rw-r--r--drivers/net/netdevsim/dev.c50
-rw-r--r--drivers/net/netdevsim/health.c8
-rw-r--r--drivers/net/nlmon.c28
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c321
-rw-r--r--drivers/net/phy/dp83640.c16
-rw-r--r--drivers/net/phy/dp83869.c431
-rw-r--r--drivers/net/phy/marvell10g.c25
-rw-r--r--drivers/net/phy/mdio_bus.c11
-rw-r--r--drivers/net/phy/mscc.c194
-rw-r--r--drivers/net/phy/phy.c7
-rw-r--r--drivers/net/phy/phy_device.c75
-rw-r--r--drivers/net/phy/phylink.c25
-rw-r--r--drivers/net/phy/sfp-bus.c92
-rw-r--r--drivers/net/phy/sfp.c526
-rw-r--r--drivers/net/slip/slip.c1
-rw-r--r--drivers/net/tun.c36
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c58
-rw-r--r--drivers/net/veth.c43
-rw-r--r--drivers/net/vsockmon.c31
-rw-r--r--drivers/net/vxlan.c24
-rw-r--r--drivers/net/wireless/admtek/adm8211.c6
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c188
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c387
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/regd.c50
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h13
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c81
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h514
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c811
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c891
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c577
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c41
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c71
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c38
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c30
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h57
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c42
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c515
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h619
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c189
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c400
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h92
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c227
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h80
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c138
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c260
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c320
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h239
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c81
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c171
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h30
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c187
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c469
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c829
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c376
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c21
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c135
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c27
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c4
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c6
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c3
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c6
-rw-r--r--drivers/nfc/pn533/pn533.c12
-rw-r--r--drivers/nfc/pn533/pn533.h4
-rw-r--r--drivers/nfc/pn533/uart.c13
-rw-r--r--drivers/nfc/st21nfca/core.c1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_net.c16
-rw-r--r--drivers/phy/ti/Kconfig4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c21
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c14
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_chardev.c20
-rw-r--r--drivers/ptp/ptp_clockmatrix.c4
-rw-r--r--drivers/pwm/core.c9
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c1
-rw-r--r--drivers/reset/core.c5
-rw-r--r--drivers/s390/net/ism.h2
-rw-r--r--drivers/s390/net/qeth_core.h10
-rw-r--r--drivers/s390/net/qeth_core_main.c82
-rw-r--r--drivers/s390/net/qeth_core_mpc.h1
-rw-r--r--drivers/s390/net/qeth_core_sys.c80
-rw-r--r--drivers/s390/net/qeth_ethtool.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_sys.c29
-rw-r--r--drivers/s390/net/qeth_l3.h1
-rw-r--r--drivers/s390/net/qeth_l3_main.c226
-rw-r--r--drivers/s390/net/qeth_l3_sys.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c29
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c4
-rw-r--r--drivers/soc/imx/gpc.c8
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/intel.c4
-rw-r--r--drivers/soundwire/slave.c3
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c3
-rw-r--r--drivers/thunderbolt/nhi_ops.c1
-rw-r--r--drivers/thunderbolt/switch.c28
-rw-r--r--drivers/usb/cdns3/gadget.c37
-rw-r--r--drivers/usb/cdns3/host-export.h1
-rw-r--r--drivers/usb/cdns3/host.c1
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/core.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/configfs.c110
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/udc/core.c11
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c11
-rw-r--r--drivers/usb/host/xhci-debugfs.c24
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c54
-rw-r--r--drivers/usb/misc/ldusb.c13
-rw-r--r--drivers/usb/mtu3/mtu3_core.c1
-rw-r--r--drivers/usb/renesas_usbhs/common.c12
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/serial/whiteheat.c13
-rw-r--r--drivers/usb/serial/whiteheat.h2
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/uas.c20
-rw-r--r--drivers/usb/usbip/vhci_tx.c3
-rw-r--r--drivers/vhost/vsock.c102
-rw-r--r--drivers/video/fbdev/c2p_core.h8
-rw-r--r--drivers/watchdog/bd70528_wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c8
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c4
-rw-r--r--drivers/watchdog/pm8916_wdt.c15
-rw-r--r--fs/afs/dir.c7
-rw-r--r--fs/aio.c10
-rw-r--r--fs/autofs/expire.c5
-rw-r--r--fs/btrfs/inode.c30
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/space-info.c21
-rw-r--r--fs/btrfs/tree-checker.c8
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c15
-rw-r--r--fs/ceph/file.c44
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/ecryptfs/inode.c84
-rw-r--r--fs/exportfs/expfs.c31
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/io_uring.c32
-rw-r--r--fs/namespace.c15
-rw-r--r--fs/ocfs2/file.c134
-rw-r--r--include/asm-generic/vdso/vsyscall.h7
-rw-r--r--include/drm/drm_gem_shmem_helper.h13
-rw-r--r--include/drm/drm_self_refresh_helper.h3
-rw-r--r--include/dt-bindings/net/qca-ar803x.h13
-rw-r--r--include/dt-bindings/net/ti-dp83869.h42
-rw-r--r--include/linux/can/core.h1
-rw-r--r--include/linux/can/platform/mcp251x.h22
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/cpu.h30
-rw-r--r--include/linux/icmp.h15
-rw-r--r--include/linux/icmpv6.h14
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/fs.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/of_net.h7
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--include/linux/phy.h11
-rw-r--r--include/linux/radix-tree.h18
-rw-r--r--include/linux/reset-controller.h4
-rw-r--r--include/linux/reset.h2
-rw-r--r--include/linux/sfp.h25
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/sxgbe_platform.h4
-rw-r--r--include/linux/u64_stats_sync.h51
-rw-r--r--include/linux/virtio_vsock.h18
-rw-r--r--include/linux/vm_sockets.h15
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/af_vsock.h45
-rw-r--r--include/net/arp.h4
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/devlink.h60
-rw-r--r--include/net/dsa.h18
-rw-r--r--include/net/flow_dissector.h11
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/mac80211.h22
-rw-r--r--include/net/ndisc.h8
-rw-r--r--include/net/neighbour.h6
-rw-r--r--include/net/netfilter/nf_flow_table.h63
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_offload.h1
-rw-r--r--include/net/netns/sctp.h14
-rw-r--r--include/net/page_pool.h85
-rw-r--r--include/net/sch_generic.h10
-rw-r--r--include/net/sctp/constants.h12
-rw-r--r--include/net/sctp/structs.h13
-rw-r--r--include/net/smc.h4
-rw-r--r--include/net/sock.h26
-rw-r--r--include/net/tcp.h12
-rw-r--r--include/net/tls.h5
-rw-r--r--include/net/vsock_addr.h2
-rw-r--r--include/net/xdp_priv.h4
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/soc/mscc/ocelot.h539
-rw-r--r--include/soc/mscc/ocelot_sys.h (renamed from drivers/net/ethernet/mscc/ocelot_sys.h)0
-rw-r--r--include/trace/events/page_pool.h44
-rw-r--r--include/trace/events/tcp.h2
-rw-r--r--include/trace/events/xdp.h19
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h2
-rw-r--r--include/uapi/linux/can/gw.h2
-rw-r--r--include/uapi/linux/can/j1939.h2
-rw-r--r--include/uapi/linux/can/netlink.h2
-rw-r--r--include/uapi/linux/can/raw.h2
-rw-r--r--include/uapi/linux/can/vxcan.h2
-rw-r--r--include/uapi/linux/devlink.h3
-rw-r--r--include/uapi/linux/gen_stats.h5
-rw-r--r--include/uapi/linux/lwtunnel.h41
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nl80211.h26
-rw-r--r--include/uapi/linux/nvme_ioctl.h1
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/pkt_sched.h22
-rw-r--r--include/uapi/linux/ptp_clock.h5
-rw-r--r--include/uapi/linux/sched.h4
-rw-r--r--include/uapi/linux/sctp.h15
-rw-r--r--include/uapi/linux/tipc.h21
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/bpf/cgroup.c4
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/cpu.c27
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/sched/core.c23
-rw-r--r--kernel/sched/deadline.c40
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/idle.c9
-rw-r--r--kernel/sched/rt.c37
-rw-r--r--kernel/sched/sched.h30
-rw-r--r--kernel/sched/stop_task.c18
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/stacktrace.c6
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/vsyscall.c9
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/dump_stack.c7
-rw-r--r--lib/idr.c31
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/test_xarray.c24
-rw-r--r--lib/xarray.c4
-rw-r--r--lib/xz/xz_dec_lzma2.c1
-rw-r--r--mm/debug.c31
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/khugepaged.c35
-rw-r--r--mm/madvise.c16
-rw-r--r--mm/memcontrol.c25
-rw-r--r--mm/memory_hotplug.c51
-rw-r--r--mm/mempolicy.c14
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slub.c39
-rw-r--r--mm/vmstat.c25
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/batman-adv/bat_v.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c34
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/batman-adv/types.h3
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/hci_request.c19
-rw-r--r--net/bridge/br_device.c36
-rw-r--r--net/bridge/br_fdb.c8
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/netfilter/ebt_dnat.c19
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/j1939/main.c9
-rw-r--r--net/can/j1939/socket.c103
-rw-r--r--net/can/j1939/transport.c56
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/devlink.c94
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/gen_stats.c12
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/page_pool.c189
-rw-r--r--net/core/skmsg.c20
-rw-r--r--net/core/xdp.c126
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/dsa/Kconfig9
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c37
-rw-r--r--net/dsa/dsa2.c3
-rw-r--r--net/dsa/port.c13
-rw-r--r--net/dsa/slave.c7
-rw-r--r--net/dsa/tag_8021q.c11
-rw-r--r--net/dsa/tag_ocelot.c229
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/icmp.c11
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c15
-rw-r--r--net/ipv4/inetpeer.c12
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_tunnel_core.c429
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c10
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/tcp.c20
-rw-r--r--net/ipv4/tcp_diag.c4
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/seg6_local.c11
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mlme.c103
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/tx.c49
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c23
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c6
-rw-r--r--net/netfilter/nf_flow_table_core.c177
-rw-r--r--net/netfilter/nf_flow_table_inet.c25
-rw-r--r--net/netfilter/nf_flow_table_offload.c851
-rw-r--r--net/netfilter/nf_tables_api.c59
-rw-r--r--net/netfilter/nf_tables_offload.c98
-rw-r--r--net/netfilter/nft_bitwise.c5
-rw-r--r--net/netfilter/nft_cmp.c8
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/nft_meta.c18
-rw-r--r--net/netfilter/nft_payload.c94
-rw-r--r--net/netfilter/xt_HMARK.c6
-rw-r--r--net/netfilter/xt_time.c19
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c48
-rw-r--r--net/openvswitch/datapath.h12
-rw-r--r--net/openvswitch/flow.c20
-rw-r--r--net/openvswitch/flow.h9
-rw-r--r--net/openvswitch/flow_netlink.c87
-rw-r--r--net/openvswitch/vport.c5
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/rds/ib_cm.c23
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_nat.c4
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/cls_api.c83
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_pie.c120
-rw-r--r--net/sched/sch_taprio.c5
-rw-r--r--net/sctp/associola.c40
-rw-r--r--net/sctp/diag.c4
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/socket.c151
-rw-r--r--net/sctp/sysctl.c22
-rw-r--r--net/smc/af_smc.c23
-rw-r--r--net/smc/smc_cdc.c3
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c27
-rw-r--r--net/smc/smc_core.c234
-rw-r--r--net/smc/smc_core.h9
-rw-r--r--net/smc/smc_ib.c9
-rw-r--r--net/smc/smc_ib.h3
-rw-r--r--net/smc/smc_ism.c22
-rw-r--r--net/smc/smc_llc.c9
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/smc/smc_tx.c2
-rw-r--r--net/smc/smc_wr.c37
-rw-r--r--net/smc/smc_wr.h10
-rw-r--r--net/tipc/Kconfig15
-rw-r--r--net/tipc/Makefile1
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/bearer.c49
-rw-r--r--net/tipc/bearer.h6
-rw-r--r--net/tipc/core.c16
-rw-r--r--net/tipc/core.h14
-rw-r--r--net/tipc/crypto.c1986
-rw-r--r--net/tipc/crypto.h167
-rw-r--r--net/tipc/link.c50
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/monitor.c15
-rw-r--r--net/tipc/monitor.h1
-rw-r--r--net/tipc/msg.c15
-rw-r--r--net/tipc/msg.h46
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/netlink.c18
-rw-r--r--net/tipc/node.c351
-rw-r--r--net/tipc/node.h13
-rw-r--r--net/tipc/sysctl.c11
-rw-r--r--net/tipc/udp_media.c1
-rw-r--r--net/tls/tls_device.c10
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_proc.c2
-rw-r--r--net/tls/tls_sw.c30
-rw-r--r--net/vmw_vsock/af_vsock.c390
-rw-r--r--net/vmw_vsock/hyperv_transport.c72
-rw-r--r--net/vmw_vsock/virtio_transport.c177
-rw-r--r--net/vmw_vsock/virtio_transport_common.c176
-rw-r--r--net/vmw_vsock/vmci_transport.c142
-rw-r--r--net/vmw_vsock/vmci_transport.h3
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h1
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface.c23
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--samples/bpf/Makefile5
-rw-r--r--samples/bpf/README.rst12
-rw-r--r--scripts/gdb/linux/symbols.py3
-rw-r--r--scripts/nsdeps6
-rwxr-xr-xscripts/tools-support-relr.sh8
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/pcm_lib.c8
-rw-r--r--sound/core/timer.c6
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c3
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_ca0132.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c17
-rw-r--r--sound/soc/codecs/hdac_hda.c2
-rw-r--r--sound/soc/codecs/hdmi-codec.c12
-rw-r--r--sound/soc/codecs/max98373.c4
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c4
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c11
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c7
-rw-r--r--sound/soc/sh/rcar/dma.c4
-rw-r--r--sound/soc/sof/debug.c6
-rw-r--r--sound/soc/sof/intel/hda-stream.c4
-rw-r--r--sound/soc/sof/ipc.c4
-rw-r--r--sound/soc/sof/topology.c11
-rw-r--r--sound/soc/stm/stm32_sai_sub.c12
-rw-r--r--sound/soc/ti/sdma-pcm.c2
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/mixer.c4
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/validate.c6
-rw-r--r--tools/gpio/Makefile6
-rw-r--r--tools/perf/perf-sys.h6
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c8
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/trace-event-parse.c31
-rw-r--r--tools/perf/util/trace-event.h2
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py25
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c8
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh563
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh557
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh10
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh15
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c4
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/altnames.sh75
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh52
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh55
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool.sh318
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_lib.sh69
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh29
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh11
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c65
-rw-r--r--tools/testing/selftests/net/tls.c108
-rwxr-xr-xtools/testing/selftests/net/traceroute.sh322
-rw-r--r--tools/testing/selftests/ptp/testptp.c53
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json145
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json250
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json325
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c2
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.c6
-rw-r--r--virt/kvm/kvm_main.c175
1291 files changed, 66500 insertions, 19995 deletions
diff --git a/.mailmap b/.mailmap
index 83d7e750c2fc..fd6219293057 100644
--- a/.mailmap
+++ b/.mailmap
@@ -108,6 +108,10 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
+Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
+Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
+Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
+Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
Jean Tourrilhes <jt@hpl.hp.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 06d0931119cc..fc20cde63d1e 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -486,6 +486,8 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 49311f3da6f2..0795e3c2643f 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -12,3 +12,5 @@ are configurable at compile, boot or run time.
spectre
l1tf
mds
+ tsx_async_abort
+ multihit.rst
diff --git a/Documentation/admin-guide/hw-vuln/multihit.rst b/Documentation/admin-guide/hw-vuln/multihit.rst
new file mode 100644
index 000000000000..ba9988d8bce5
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/multihit.rst
@@ -0,0 +1,163 @@
+iTLB multihit
+=============
+
+iTLB multihit is an erratum where some processors may incur a machine check
+error, possibly resulting in an unrecoverable CPU lockup, when an
+instruction fetch hits multiple entries in the instruction TLB. This can
+occur when the page size is changed along with either the physical address
+or cache type. A malicious guest running on a virtualized system can
+exploit this erratum to perform a denial of service attack.
+
+
+Affected processors
+-------------------
+
+Variations of this erratum are present on most Intel Core and Xeon processor
+models. The erratum is not present on:
+
+ - non-Intel processors
+
+ - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont)
+
+ - Intel processors that have the PSCHANGE_MC_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+
+Related CVEs
+------------
+
+The following CVE entry is related to this issue:
+
+ ============== =================================================
+ CVE-2018-12207 Machine Check Error Avoidance on Page Size Change
+ ============== =================================================
+
+
+Problem
+-------
+
+Privileged software, including OS and virtual machine managers (VMM), are in
+charge of memory management. A key component in memory management is the control
+of the page tables. Modern processors use virtual memory, a technique that creates
+the illusion of a very large memory for processors. This virtual space is split
+into pages of a given size. Page tables translate virtual addresses to physical
+addresses.
+
+To reduce latency when performing a virtual to physical address translation,
+processors include a structure, called TLB, that caches recent translations.
+There are separate TLBs for instruction (iTLB) and data (dTLB).
+
+Under this errata, instructions are fetched from a linear address translated
+using a 4 KB translation cached in the iTLB. Privileged software modifies the
+paging structure so that the same linear address using large page size (2 MB, 4
+MB, 1 GB) with a different physical address or memory type. After the page
+structure modification but before the software invalidates any iTLB entries for
+the linear address, a code fetch that happens on the same linear address may
+cause a machine-check error which can result in a system hang or shutdown.
+
+
+Attack scenarios
+----------------
+
+Attacks against the iTLB multihit erratum can be mounted from malicious
+guests in a virtualized system.
+
+
+iTLB multihit system information
+--------------------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current iTLB
+multihit status of the system:whether the system is vulnerable and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - Not affected
+ - The processor is not vulnerable.
+ * - KVM: Mitigation: Split huge pages
+ - Software changes mitigate this issue.
+ * - KVM: Vulnerable
+ - The processor is vulnerable, but no mitigation enabled
+
+
+Enumeration of the erratum
+--------------------------------
+
+A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr
+and will be set on CPU's which are mitigated against this issue.
+
+ ======================================= =========== ===============================
+ IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable
+ ======================================= =========== ===============================
+
+
+Mitigation mechanism
+-------------------------
+
+This erratum can be mitigated by restricting the use of large page sizes to
+non-executable pages. This forces all iTLB entries to be 4K, and removes
+the possibility of multiple hits.
+
+In order to mitigate the vulnerability, KVM initially marks all huge pages
+as non-executable. If the guest attempts to execute in one of those pages,
+the page is broken down into 4K pages, which are then marked executable.
+
+If EPT is disabled or not available on the host, KVM is in control of TLB
+flushes and the problematic situation cannot happen. However, the shadow
+EPT paging mechanism used by nested virtualization is vulnerable, because
+the nested guest can trigger multiple iTLB hits by modifying its own
+(non-nested) page tables. For simplicity, KVM will make large pages
+non-executable in all shadow paging modes.
+
+Mitigation control on the kernel command line and KVM - module parameter
+------------------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism for marking huge pages as
+non-executable can be controlled with a module parameter "nx_huge_pages=".
+The kernel command line allows to control the iTLB multihit mitigations at
+boot time with the option "kvm.nx_huge_pages=".
+
+The valid arguments for these options are:
+
+ ========== ================================================================
+ force Mitigation is enabled. In this case, the mitigation implements
+ non-executable huge pages in Linux kernel KVM module. All huge
+ pages in the EPT are marked as non-executable.
+ If a guest attempts to execute in one of those pages, the page is
+ broken down into 4K pages, which are then marked executable.
+
+ off Mitigation is disabled.
+
+ auto Enable mitigation only if the platform is affected and the kernel
+ was not booted with the "mitigations=off" command line parameter.
+ This is the default option.
+ ========== ================================================================
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The system is protected by the kernel unconditionally and no further
+ action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the guest comes from a trusted source, you may assume that the guest will
+ not attempt to maliciously exploit these errata and no further action is
+ required.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ If the guest comes from an untrusted source, the guest host kernel will need
+ to apply iTLB multihit mitigation via the kernel command line or kvm
+ module parameter.
diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
new file mode 100644
index 000000000000..fddbd7579c53
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
@@ -0,0 +1,276 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TAA - TSX Asynchronous Abort
+======================================
+
+TAA is a hardware vulnerability that allows unprivileged speculative access to
+data which is available in various CPU internal buffers by using asynchronous
+aborts within an Intel TSX transactional region.
+
+Affected processors
+-------------------
+
+This vulnerability only affects Intel processors that support Intel
+Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8)
+is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit
+(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations
+also mitigate against TAA.
+
+Whether a processor is affected or not can be read out from the TAA
+vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entry is related to this TAA issue:
+
+ ============== ===== ===================================================
+ CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some
+ microprocessors utilizing speculative execution may
+ allow an authenticated user to potentially enable
+ information disclosure via a side channel with
+ local access.
+ ============== ===== ===================================================
+
+Problem
+-------
+
+When performing store, load or L1 refill operations, processors write
+data into temporary microarchitectural structures (buffers). The data in
+those buffers can be forwarded to load operations as an optimization.
+
+Intel TSX is an extension to the x86 instruction set architecture that adds
+hardware transactional memory support to improve performance of multi-threaded
+software. TSX lets the processor expose and exploit concurrency hidden in an
+application due to dynamically avoiding unnecessary synchronization.
+
+TSX supports atomic memory transactions that are either committed (success) or
+aborted. During an abort, operations that happened within the transactional region
+are rolled back. An asynchronous abort takes place, among other options, when a
+different thread accesses a cache line that is also used within the transactional
+region when that access might lead to a data race.
+
+Immediately after an uncompleted asynchronous abort, certain speculatively
+executed loads may read data from those internal buffers and pass it to dependent
+operations. This can be then used to infer the value via a cache side channel
+attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+The victim of a malicious actor does not need to make use of TSX. Only the
+attacker needs to begin a TSX transaction and raise an asynchronous abort
+which in turn potenitally leaks data stored in the buffers.
+
+More detailed technical information is available in the TAA specific x86
+architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the TAA vulnerability can be implemented from unprivileged
+applications running on hosts or guests.
+
+As for MDS, the attacker has no control over the memory addresses that can
+be leaked. Only the victim is responsible for bringing data to the CPU. As
+a result, the malicious actor has to sample as much data as possible and
+then postprocess it to try to infer any useful information from it.
+
+A potential attacker only has read access to the data. Also, there is no direct
+privilege escalation by using this technique.
+
+
+.. _tsx_async_abort_sys_info:
+
+TAA system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current TAA status
+of mitigated systems. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - 'Vulnerable'
+ - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied.
+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+ - The system tries to clear the buffers but the microcode might not support the operation.
+ * - 'Mitigation: Clear CPU buffers'
+ - The microcode has been updated to clear the buffers. TSX is still enabled.
+ * - 'Mitigation: TSX disabled'
+ - TSX is disabled.
+ * - 'Not affected'
+ - The CPU is not affected by this issue.
+
+.. _ucode_needed:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the processor is vulnerable, but the availability of the microcode-based
+mitigation mechanism is not advertised via CPUID the kernel selects a best
+effort mitigation mode. This mode invokes the mitigation instructions
+without a guarantee that they clear the CPU buffers.
+
+This is done to address virtualization scenarios where the host has the
+microcode update applied, but the hypervisor is not yet updated to expose the
+CPUID to the guest. If the host has updated microcode the protection takes
+effect; otherwise a few CPU cycles are wasted pointlessly.
+
+The state in the tsx_async_abort sysfs file reflects this situation
+accordingly.
+
+
+Mitigation mechanism
+--------------------
+
+The kernel detects the affected CPUs and the presence of the microcode which is
+required. If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default.
+
+
+The mitigation can be controlled at boot time via a kernel command line option.
+See :ref:`taa_mitigation_control_command_line`.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Affected systems where the host has TAA microcode and TAA is mitigated by
+having disabled TSX previously, are not vulnerable regardless of the status
+of the VMs.
+
+In all other cases, if the host either does not have the TAA microcode or
+the kernel is not mitigated, the system might be vulnerable.
+
+
+.. _taa_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the TAA mitigations at boot time with
+the option "tsx_async_abort=". The valid arguments for this option are:
+
+ ============ =============================================================
+ off This option disables the TAA mitigation on affected platforms.
+ If the system has TSX enabled (see next parameter) and the CPU
+ is affected, the system is vulnerable.
+
+ full TAA mitigation is enabled. If TSX is enabled, on an affected
+ system it will clear CPU buffers on ring transitions. On
+ systems which are MDS-affected and deploy MDS mitigation,
+ TAA is also mitigated. Specifying this option on those
+ systems will have no effect.
+
+ full,nosmt The same as tsx_async_abort=full, with SMT disabled on
+ vulnerable CPUs that have TSX enabled. This is the complete
+ mitigation. When TSX is disabled, SMT is not disabled because
+ CPU is not vulnerable to cross-thread TAA attacks.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx_async_abort=full".
+
+The kernel command line also allows to control the TSX feature using the
+parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
+to control the TSX feature and the enumeration of the TSX feature bits (RTM
+and HLE) in CPUID.
+
+The valid options are:
+
+ ============ =============================================================
+ off Disables TSX on the system.
+
+ Note that this option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1
+ and which get the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable deactivation of
+ the TSX functionality.
+
+ on Enables TSX.
+
+ Although there are mitigations for all known security
+ vulnerabilities, TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and so there may be
+ unknown security risks associated with leaving it enabled.
+
+ auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX
+ on the system.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx=off".
+
+The following combinations of the "tsx_async_abort" and "tsx" are possible. For
+affected platforms tsx=auto is equivalent to tsx=off and the result will be:
+
+ ========= ========================== =========================================
+ tsx=on tsx_async_abort=full The system will use VERW to clear CPU
+ buffers. Cross-thread attacks are still
+ possible on SMT machines.
+ tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT
+ mitigated.
+ tsx=on tsx_async_abort=off The system is vulnerable.
+ tsx=off tsx_async_abort=full TSX might be disabled if microcode
+ provides a TSX control MSR. If so,
+ system is not vulnerable.
+ tsx=off tsx_async_abort=full,nosmt Ditto
+ tsx=off tsx_async_abort=off ditto
+ ========= ========================== =========================================
+
+
+For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU
+buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0)
+"tsx" command line argument has no effect.
+
+For the affected platforms below table indicates the mitigation status for the
+combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO
+and TSX_CTRL_MSR.
+
+ ======= ========= ============= ========================================
+ MDS_NO MD_CLEAR TSX_CTRL_MSR Status
+ ======= ========= ============= ========================================
+ 0 0 0 Vulnerable (needs microcode)
+ 0 1 0 MDS and TAA mitigated via VERW
+ 1 1 0 MDS fixed, TAA vulnerable if TSX enabled
+ because MD_CLEAR has no meaning and
+ VERW is not guaranteed to clear buffers
+ 1 X 1 MDS fixed, TAA can be mitigated by
+ VERW or TSX_CTRL_MSR
+ ======= ========= ============= ========================================
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If all user space applications are from a trusted source and do not execute
+untrusted code which is supplied externally, then the mitigation can be
+disabled. The same applies to virtualized environments with trusted guests.
+
+
+2. Untrusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If there are untrusted applications or guests on the system, enabling TSX
+might allow a malicious actor to leak data from the host or from other
+processes running on the same physical core.
+
+If the microcode is available and the TSX is disabled on the host, attacks
+are prevented in a virtualized environment as well, even if the VMs do not
+explicitly enable the mitigation.
+
+
+.. _taa_default_mitigations:
+
+Default mitigations
+-------------------
+
+The kernel's default action for vulnerable processors is:
+
+ - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off).
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a84a83f8881e..8dee8f68fe15 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2055,6 +2055,25 @@
KVM MMU at runtime.
Default is 0 (off)
+ kvm.nx_huge_pages=
+ [KVM] Controls the software workaround for the
+ X86_BUG_ITLB_MULTIHIT bug.
+ force : Always deploy workaround.
+ off : Never deploy workaround.
+ auto : Deploy workaround based on the presence of
+ X86_BUG_ITLB_MULTIHIT.
+
+ Default is 'auto'.
+
+ If the software workaround is enabled for the host,
+ guests do need not to enable it for nested guests.
+
+ kvm.nx_huge_pages_recovery_ratio=
+ [KVM] Controls how many 4KiB pages are periodically zapped
+ back to huge pages. 0 disables the recovery, otherwise if
+ the value is N KVM will zap 1/Nth of the 4KiB pages every
+ minute. The default is 60.
+
kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
Default is 1 (enabled)
@@ -2636,6 +2655,13 @@
ssbd=force-off [ARM64]
l1tf=off [X86]
mds=off [X86]
+ tsx_async_abort=off [X86]
+ kvm.nx_huge_pages=off [X86]
+
+ Exceptions:
+ This does not have any effect on
+ kvm.nx_huge_pages when
+ kvm.nx_huge_pages=force.
auto (default)
Mitigate all CPU vulnerabilities, but leave SMT
@@ -2651,6 +2677,7 @@
be fully mitigated, even if it means losing SMT.
Equivalent to: l1tf=flush,nosmt [X86]
mds=full,nosmt [X86]
+ tsx_async_abort=full,nosmt [X86]
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
@@ -4848,6 +4875,71 @@
interruptions from clocksource watchdog are not
acceptable).
+ tsx= [X86] Control Transactional Synchronization
+ Extensions (TSX) feature in Intel processors that
+ support TSX control.
+
+ This parameter controls the TSX feature. The options are:
+
+ on - Enable TSX on the system. Although there are
+ mitigations for all known security vulnerabilities,
+ TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and
+ so there may be unknown security risks associated
+ with leaving it enabled.
+
+ off - Disable TSX on the system. (Note that this
+ option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have
+ MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get
+ the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable
+ deactivation of the TSX functionality.)
+
+ auto - Disable TSX if X86_BUG_TAA is present,
+ otherwise enable TSX on the system.
+
+ Not specifying this option is equivalent to tsx=off.
+
+ See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+ for more details.
+
+ tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async
+ Abort (TAA) vulnerability.
+
+ Similar to Micro-architectural Data Sampling (MDS)
+ certain CPUs that support Transactional
+ Synchronization Extensions (TSX) are vulnerable to an
+ exploit against CPU internal buffers which can forward
+ information to a disclosure gadget under certain
+ conditions.
+
+ In vulnerable processors, the speculatively forwarded
+ data can be used in a cache side channel attack, to
+ access data to which the attacker does not have direct
+ access.
+
+ This parameter controls the TAA mitigation. The
+ options are:
+
+ full - Enable TAA mitigation on vulnerable CPUs
+ if TSX is enabled.
+
+ full,nosmt - Enable TAA mitigation and disable SMT on
+ vulnerable CPUs. If TSX is disabled, SMT
+ is not disabled because CPU is not
+ vulnerable to cross-thread TAA attacks.
+ off - Unconditionally disable TAA mitigation
+
+ Not specifying this option is equivalent to
+ tsx_async_abort=full. On CPUs which are MDS affected
+ and deploy MDS mitigation, TAA mitigation is not
+ required and doesn't provide any additional
+ mitigation.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
Format:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index b7336b9d6a3c..48a7f916c5e4 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -44,6 +44,12 @@ Optional properties:
Admission Control Block supports reporting the number of packets in-flight in a
switch queue
+- resets: a single phandle and reset identifier pair. See
+ Documentation/devicetree/binding/reset/reset.txt for details.
+
+- reset-names: If the "reset" property is specified, this property should have
+ the value "switch" to denote the switch reset line.
+
Port subnodes:
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 3956af1d30f3..33a0d67e4ce5 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
@@ -2,7 +2,7 @@
Required properties:
- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
- "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5".
+ "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5", "brcm,bcm2711-genet-v5".
- reg: address and length of the register set for the device
- interrupts and/or interrupts-extended: must be two cells, the first cell
is the general purpose interrupt line, while the second cell is the
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
index 4fa00e2eafcf..c749dc297624 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
@@ -14,6 +14,7 @@ Required properties:
* "brcm,bcm4330-bt"
* "brcm,bcm43438-bt"
* "brcm,bcm4345c5"
+ * "brcm,bcm43540-bt"
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 0e7c31794ae6..ac471b60ed6a 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -121,6 +121,11 @@ properties:
and is useful for determining certain configuration settings
such as flow control thresholds.
+ sfp:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Specifies a reference to a node representing a SFP cage.
+
tx-fifo-depth:
$ref: /schemas/types.yaml#definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index f70f18ff821f..8927941c74bb 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -153,6 +153,11 @@ properties:
Delay after the reset was deasserted in microseconds. If
this property is missing the delay will be skipped.
+ sfp:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Specifies a reference to a node representing a SFP cage.
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
new file mode 100644
index 000000000000..5a6c9d20c0ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,ar803x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR803x PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Heiner Kallweit <hkallweit1@gmail.com>
+
+description: |
+ Bindings for Qualcomm Atheros AR803x PHYs
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ qca,clk-out-frequency:
+ description: Clock output frequency in Hertz.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 25000000, 50000000, 62500000, 125000000 ]
+
+ qca,clk-out-strength:
+ description: Clock output driver strength.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1, 2 ]
+
+ qca,keep-pll-enabled:
+ description: |
+ If set, keep the PLL enabled even if there is no link. Useful if you
+ want to use the clock output without an ethernet link.
+
+ Only supported on the AR8031.
+ type: boolean
+
+ vddio-supply:
+ description: |
+ RGMII I/O voltage regulator (see regulator/regulator.yaml).
+
+ The PHY supports RGMII I/O voltages of 1.5V, 1.8V and 2.5V. You can
+ either connect this to the vddio-regulator (1.5V / 1.8V) or the
+ vddh-regulator (2.5V).
+
+ Only supported on the AR8031.
+
+ vddio-regulator:
+ type: object
+ description:
+ Initial data for the VDDIO regulator. Set this to 1.5V or 1.8V.
+ allOf:
+ - $ref: /schemas/regulator/regulator.yaml
+
+ vddh-regulator:
+ type: object
+ description:
+ Dummy subnode to model the external connection of the PHY VDDH
+ regulator to VDDIO.
+ allOf:
+ - $ref: /schemas/regulator/regulator.yaml
+
+
+examples:
+ - |
+ #include <dt-bindings/net/qca-ar803x.h>
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy-mode = "rgmii-id";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ qca,clk-out-frequency = <125000000>;
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+
+ vddio-supply = <&vddio>;
+
+ vddio: vddio-regulator {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ - |
+ #include <dt-bindings/net/qca-ar803x.h>
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy-mode = "rgmii-id";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ qca,clk-out-frequency = <50000000>;
+ qca,keep-pll-enabled;
+
+ vddio-supply = <&vddh>;
+
+ vddh: vddh-regulator {
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
new file mode 100644
index 000000000000..81ae8cafabc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -0,0 +1,240 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,cpsw-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI SoC Ethernet Switch Controller (CPSW) Device Tree Bindings
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+ - Sekhar Nori <nsekhar@ti.com>
+
+description:
+ The 3-port switch gigabit ethernet subsystem provides ethernet packet
+ communication and can be configured as an ethernet switch. It provides the
+ gigabit media independent interface (GMII),reduced gigabit media
+ independent interface (RGMII), reduced media independent interface (RMII),
+ the management data input output (MDIO) for physical layer device (PHY)
+ management.
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,am335x-cpsw-switch
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,am4372-cpsw-switch
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,dra7-cpsw-switch
+ - const: ti,cpsw-switch
+
+ reg:
+ maxItems: 1
+ description:
+ The physical base address and size of full the CPSW module IO range
+
+ ranges: true
+
+ clocks:
+ maxItems: 1
+ description: CPSW functional clock
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: fck
+
+ interrupts:
+ items:
+ - description: RX_THRESH interrupt
+ - description: RX interrupt
+ - description: TX interrupt
+ - description: MISC interrupt
+
+ interrupt-names:
+ items:
+ - const: "rx_thresh"
+ - const: "rx"
+ - const: "tx"
+ - const: "misc"
+
+ pinctrl-names: true
+
+ syscon:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Phandle to the system control device node which provides access to
+ efuse IO range with MAC addresses
+
+
+ ethernet-ports:
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^port@[0-9]+$":
+ type: object
+ minItems: 1
+ maxItems: 2
+ description: CPSW external ports
+
+ allOf:
+ - $ref: ethernet-controller.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+ enum: [1, 2]
+ description: CPSW port number
+
+ phys:
+ $ref: /schemas/types.yaml#definitions/phandle-array
+ maxItems: 1
+ description: phandle on phy-gmii-sel PHY
+
+ label:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ maxItems: 1
+ description: label associated with this port
+
+ ti,dual-emac-pvid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ minimum: 1
+ maximum: 1024
+ description:
+ Specifies default PORT VID to be used to segregate
+ ports. Default value - CPSW port number.
+
+ required:
+ - reg
+ - phys
+
+ mdio:
+ type: object
+ allOf:
+ - $ref: "ti,davinci-mdio.yaml#"
+ description:
+ CPSW MDIO bus.
+
+ cpts:
+ type: object
+ description:
+ The Common Platform Time Sync (CPTS) module
+
+ properties:
+ clocks:
+ maxItems: 1
+ description: CPTS reference clock
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: cpts
+
+ cpts_clock_mult:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Numerator to convert input clock ticks into ns
+
+ cpts_clock_shift:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Denominator to convert input clock ticks into ns.
+ Mult and shift will be calculated basing on CPTS rftclk frequency if
+ both cpts_clock_shift and cpts_clock_mult properties are not provided.
+
+ required:
+ - clocks
+ - clock-names
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - '#address-cells'
+ - '#size-cells'
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/dra7.h>
+
+ mac_sw: switch@0 {
+ compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch";
+ reg = <0x0 0x4000>;
+ ranges = <0 0 0x4000>;
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ syscon = <&scm_conf>;
+ inctrl-names = "default", "sleep";
+
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+ phy-handle = <&ethphy0_sw>;
+ phy-mode = "rgmii";
+ ti,dual_emac_pvid = <1>;
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ label = "wan";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
+ phy-handle = <&ethphy1_sw>;
+ phy-mode = "rgmii";
+ ti,dual_emac_pvid = <2>;
+ };
+ };
+
+ davinci_mdio_sw: mdio@1000 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x1000 0x100>;
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+
+ ethphy0_sw: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1_sw: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ cpts {
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>;
+ clock-names = "cpts";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
new file mode 100644
index 000000000000..6fe3e451da8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/net/ti,dp83869.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI DP83869 ethernet PHY
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The DP83869HM device is a robust, fully-featured Gigabit (PHY) transceiver
+ with integrated PMD sublayers that supports 10BASE-Te, 100BASE-TX and
+ 1000BASE-T Ethernet protocols. The DP83869 also supports 1000BASE-X and
+ 100BASE-FX Fiber protocols.
+ This device interfaces to the MAC layer through Reduced GMII (RGMII) and
+ SGMII The DP83869HM supports Media Conversion in Managed mode. In this mode,
+ the DP83869HM can run 1000BASE-X-to-1000BASE-T and 100BASE-FX-to-100BASE-TX
+ conversions. The DP83869HM can also support Bridge Conversion from RGMII to
+ SGMII and SGMII to RGMII.
+
+ Specifications about the charger can be found at:
+ http://www.ti.com/lit/ds/symlink/dp83869hm.pdf
+
+properties:
+ reg:
+ maxItems: 1
+
+ ti,min-output-impedance:
+ type: boolean
+ description: |
+ MAC Interface Impedance control to set the programmable output impedance
+ to a minimum value (35 ohms).
+
+ ti,max-output-impedance:
+ type: boolean
+ description: |
+ MAC Interface Impedance control to set the programmable output impedance
+ to a maximum value (70 ohms).
+
+ tx-fifo-depth:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Transmitt FIFO depth see dt-bindings/net/ti-dp83869.h for values
+
+ rx-fifo-depth:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Receive FIFO depth see dt-bindings/net/ti-dp83869.h for values
+
+ ti,clk-output-sel:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Muxing option for CLK_OUT pin see dt-bindings/net/ti-dp83869.h for values.
+
+ ti,op-mode:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Operational mode for the PHY. If this is not set then the operational
+ mode is set by the straps. see dt-bindings/net/ti-dp83869.h for values
+
+required:
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/net/ti-dp83869.h>
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ tx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,op-mode = <DP83869_RGMII_COPPER_ETHERNET>;
+ ti,max-output-impedance = "true";
+ ti,clk-output-sel = <DP83869_CLK_O_SEL_CHN_A_RCLK>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index ae661e65354e..017128394a3e 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -81,6 +81,12 @@ Optional properties:
Definition: Name of external front end module used. Some valid FEM names
for example: "microsemi-lx5586", "sky85703-11"
and "sky85803" etc.
+- qcom,snoc-host-cap-8bit-quirk:
+ Usage: Optional
+ Value type: <empty>
+ Definition: Quirk specifying that the firmware expects the 8bit version
+ of the host capability QMI request
+- qcom,xo-cal-data: xo cal offset to be configured in xo trim register.
Example (to supply PCI based wifi block details):
diff --git a/Documentation/networking/device_drivers/aquantia/atlantic.txt b/Documentation/networking/device_drivers/aquantia/atlantic.txt
index d235cbaeccc6..2013fcedc2da 100644
--- a/Documentation/networking/device_drivers/aquantia/atlantic.txt
+++ b/Documentation/networking/device_drivers/aquantia/atlantic.txt
@@ -1,5 +1,5 @@
-aQuantia AQtion Driver for the aQuantia Multi-Gigabit PCI Express Family of
-Ethernet Adapters
+Marvell(Aquantia) AQtion Driver for the aQuantia Multi-Gigabit PCI Express
+Family of Ethernet Adapters
=============================================================================
Contents
@@ -325,6 +325,46 @@ Supported ethtool options
Example:
ethtool -N eth0 flow-type udp4 action 0 loc 32
+ UDP GSO hardware offload
+ ---------------------------------
+ UDP GSO allows to boost UDP tx rates by offloading UDP headers allocation
+ into hardware. A special userspace socket option is required for this,
+ could be validated with /kernel/tools/testing/selftests/net/
+
+ udpgso_bench_tx -u -4 -D 10.0.1.1 -s 6300 -S 100
+
+ Will cause sending out of 100 byte sized UDP packets formed from single
+ 6300 bytes user buffer.
+
+ UDP GSO is configured by:
+
+ ethtool -K eth0 tx-udp-segmentation on
+
+ Private flags (testing)
+ ---------------------------------
+
+ Atlantic driver supports private flags for hardware custom features:
+
+ $ ethtool --show-priv-flags ethX
+
+ Private flags for ethX:
+ DMASystemLoopback : off
+ PKTSystemLoopback : off
+ DMANetworkLoopback : off
+ PHYInternalLoopback: off
+ PHYExternalLoopback: off
+
+ Example:
+
+ $ ethtool --set-priv-flags ethX DMASystemLoopback on
+
+ DMASystemLoopback: DMA Host loopback.
+ PKTSystemLoopback: Packet buffer host loopback.
+ DMANetworkLoopback: Network side loopback on DMA block.
+ PHYInternalLoopback: Internal loopback on Phy.
+ PHYExternalLoopback: External loopback on Phy (with loopback ethernet cable).
+
+
Command Line Parameters
=======================
The following command line parameters are available on atlantic driver:
@@ -426,7 +466,7 @@ Support
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
-to the issue to support@aquantia.com
+to the issue to aqn_support@marvell.com
License
=======
diff --git a/Documentation/networking/device_drivers/freescale/dpaa.txt b/Documentation/networking/device_drivers/freescale/dpaa.txt
index f88194f71c54..b06601ff9200 100644
--- a/Documentation/networking/device_drivers/freescale/dpaa.txt
+++ b/Documentation/networking/device_drivers/freescale/dpaa.txt
@@ -129,9 +129,9 @@ CONFIG_AQUANTIA_PHY=y
DPAA Ethernet Frame Processing
==============================
-On Rx, buffers for the incoming frames are retrieved from one of the three
-existing buffers pools. The driver initializes and seeds these, each with
-buffers of different sizes: 1KB, 2KB and 4KB.
+On Rx, buffers for the incoming frames are retrieved from the buffers found
+in the dedicated interface buffer pool. The driver initializes and seeds these
+with one page buffers.
On Tx, all transmitted frames are returned to the driver through Tx
confirmation frame queues. The driver is then responsible for freeing the
@@ -254,7 +254,7 @@ The following statistics are exported for each interface through ethtool:
The driver also exports the following information in sysfs:
- the FQ IDs for each FQ type
- /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+ /sys/devices/platform/soc/<addr>.fman/<addr>.ethernet/dpaa-ethernet.<id>/net/fm<nr>-mac<nr>/fqids
- - the IDs of the buffer pools in use
- /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
+ - the ID of the buffer pool in use
+ /sys/devices/platform/soc/<addr>.fman/<addr>.ethernet/dpaa-ethernet.<id>/net/fm<nr>-mac<nr>/bpids
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index d071c6b49e1f..7599dceba9f1 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -154,6 +154,27 @@ User command examples:
values:
cmode runtime value smfs
+enable_roce: RoCE enablement state
+----------------------------------
+RoCE enablement state controls driver support for RoCE traffic.
+When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well known UDP RoCE port is handled as raw ethernet traffic.
+
+To change RoCE enablement state a user must change the driverinit cmode value and run devlink reload.
+
+User command examples:
+
+- Disable RoCE::
+
+ $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
+ $ devlink dev reload pci/0000:06:00.0
+
+- Read RoCE enablement state::
+
+ $ devlink dev param show pci/0000:06:00.0 name enable_roce
+ pci/0000:06:00.0:
+ name enable_roce type generic
+ values:
+ cmode driverinit value true
Devlink health reporters
========================
diff --git a/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt
new file mode 100644
index 000000000000..5c8cee17fca9
--- /dev/null
+++ b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt
@@ -0,0 +1,209 @@
+* Texas Instruments CPSW switchdev based ethernet driver 2.0
+
+- Port renaming
+On older udev versions renaming of ethX to swXpY will not be automatically
+supported
+In order to rename via udev:
+ip -d link show dev sw0p1 | grep switchid
+
+SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}==<switchid>, \
+ ATTR{phys_port_name}!="", NAME="sw0$attr{phys_port_name}"
+
+
+====================
+# Dual mac mode
+====================
+- The new (cpsw_new.c) driver is operating in dual-emac mode by default, thus
+working as 2 individual network interfaces. Main differences from legacy CPSW
+driver are:
+ - optimized promiscuous mode: The P0_UNI_FLOOD (both ports) is enabled in
+addition to ALLMULTI (current port) instead of ALE_BYPASS.
+So, Ports in promiscuous mode will keep possibility of mcast and vlan filtering,
+which is provides significant benefits when ports are joined to the same bridge,
+but without enabling "switch" mode, or to different bridges.
+ - learning disabled on ports as it make not too much sense for
+ segregated ports - no forwarding in HW.
+ - enabled basic support for devlink.
+
+ devlink dev show
+ platform/48484000.switch
+
+ devlink dev param show
+ platform/48484000.switch:
+ name switch_mode type driver-specific
+ values:
+ cmode runtime value false
+ name ale_bypass type driver-specific
+ values:
+ cmode runtime value false
+
+Devlink configuration parameters
+====================
+See Documentation/networking/devlink-params-ti-cpsw-switch.txt
+
+====================
+# Bridging in dual mac mode
+====================
+The dual_mac mode requires two vids to be reserved for internal purposes,
+which, by default, equal CPSW Port numbers. As result, bridge has to be
+configured in vlan unaware mode or default_pvid has to be adjusted.
+
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge vlan_filtering 0
+ echo 0 > /sys/class/net/br0/bridge/default_pvid
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+ - or -
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge vlan_filtering 0
+ echo 100 > /sys/class/net/br0/bridge/default_pvid
+ ip link set dev br0 type bridge vlan_filtering 1
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+
+====================
+# Enabling "switch"
+====================
+The Switch mode can be enabled by configuring devlink driver parameter
+"switch_mode" to 1/true:
+ devlink dev param set platform/48484000.switch \
+ name switch_mode value 1 cmode runtime
+
+This can be done regardless of the state of Port's netdev devices - UP/DOWN, but
+Port's netdev devices have to be in UP before joining to the bridge to avoid
+overwriting of bridge configuration as CPSW switch driver copletly reloads its
+configuration when first Port changes its state to UP.
+
+When the both interfaces joined the bridge - CPSW switch driver will enable
+marking packets with offload_fwd_mark flag unless "ale_bypass=0"
+
+All configuration is implemented via switchdev API.
+
+====================
+# Bridge setup
+====================
+ devlink dev param set platform/48484000.switch \
+ name switch_mode value 1 cmode runtime
+
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge ageing_time 1000
+ ip link set dev sw0p1 up
+ ip link set dev sw0p2 up
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+ [*] bridge vlan add dev br0 vid 1 pvid untagged self
+
+[*] if vlan_filtering=1. where default_pvid=1
+
+=================
+# On/off STP
+=================
+ip link set dev BRDEV type bridge stp_state 1/0
+
+Note. Steps [*] are mandatory.
+
+====================
+# VLAN configuration
+====================
+bridge vlan add dev br0 vid 1 pvid untagged self <---- add cpu port to VLAN 1
+
+Note. This step is mandatory for bridge/default_pvid.
+
+=================
+# Add extra VLANs
+=================
+ 1. untagged:
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 pvid untagged master
+ bridge vlan add dev br0 vid 100 pvid untagged self <---- Add cpu port to VLAN100
+
+ 2. tagged:
+ bridge vlan add dev sw0p1 vid 100 master
+ bridge vlan add dev sw0p2 vid 100 master
+ bridge vlan add dev br0 vid 100 pvid tagged self <---- Add cpu port to VLAN100
+
+====
+FDBs
+====
+FDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding FDBs:
+bridge fdb add aa:bb:cc:dd:ee:ff dev sw0p1 master vlan 100
+bridge fdb add aa:bb:cc:dd:ee:fe dev sw0p2 master <---- Add on all VLANs
+
+====
+MDBs
+====
+MDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding MDBs:
+bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent vid 100
+bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent <---- Add on all VLANs
+
+==================
+Multicast flooding
+==================
+CPU port mcast_flooding is always on
+
+Turning flooding on/off on swithch ports:
+bridge link set dev sw0p1 mcast_flood on/off
+
+==================
+Access and Trunk port
+==================
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 master
+
+
+ bridge vlan add dev br0 vid 100 self
+ ip link add link br0 name br0.100 type vlan id 100
+
+ Note. Setting PVID on Bridge device itself working only for
+ default VLAN (default_pvid).
+
+=====================
+ NFS
+=====================
+The only way for NFS to work is by chrooting to a minimal environment when
+switch configuration that will affect connectivity is needed.
+Assuming you are booting NFS with eth1 interface(the script is hacky and
+it's just there to prove NFS is doable).
+
+setup.sh:
+#!/bin/sh
+mkdir proc
+mount -t proc none /proc
+ifconfig br0 > /dev/null
+if [ $? -ne 0 ]; then
+ echo "Setting up bridge"
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge ageing_time 1000
+ ip link set dev br0 type bridge vlan_filtering 1
+
+ ip link set eth1 down
+ ip link set eth1 name sw0p1
+ ip link set dev sw0p1 up
+ ip link set dev sw0p2 up
+ ip link set dev sw0p2 master br0
+ ip link set dev sw0p1 master br0
+ bridge vlan add dev br0 vid 1 pvid untagged self
+ ifconfig sw0p1 0.0.0.0
+ udhchc -i br0
+fi
+umount /proc
+
+run_nfs.sh:
+#!/bin/sh
+mkdir /tmp/root/bin -p
+mkdir /tmp/root/lib -p
+
+cp -r /lib/ /tmp/root/
+cp -r /bin/ /tmp/root/
+cp /sbin/ip /tmp/root/bin
+cp /sbin/bridge /tmp/root/bin
+cp /sbin/ifconfig /tmp/root/bin
+cp /sbin/udhcpc /tmp/root/bin
+cp /path/to/setup.sh /tmp/root/bin
+chroot /tmp/root/ busybox sh /bin/setup.sh
+
+run ./run_nfs.sh
diff --git a/Documentation/networking/devlink-params-mlx5.txt b/Documentation/networking/devlink-params-mlx5.txt
new file mode 100644
index 000000000000..5071467118bd
--- /dev/null
+++ b/Documentation/networking/devlink-params-mlx5.txt
@@ -0,0 +1,17 @@
+flow_steering_mode [DEVICE, DRIVER-SPECIFIC]
+ Controls the flow steering mode of the driver.
+ Two modes are supported:
+ 1. 'dmfs' - Device managed flow steering.
+ 2. 'smfs - Software/Driver managed flow steering.
+ In DMFS mode, the HW steering entities are created and
+ managed through the Firmware.
+ In SMFS mode, the HW steering entities are created and
+ managed though by the driver directly into Hardware
+ without firmware intervention.
+ Type: String
+ Configuration mode: runtime
+
+enable_roce [DEVICE, GENERIC]
+ Enable handling of RoCE traffic in the device.
+ Defaultly enabled.
+ Configuration mode: driverinit
diff --git a/Documentation/networking/devlink-params-ti-cpsw-switch.txt b/Documentation/networking/devlink-params-ti-cpsw-switch.txt
new file mode 100644
index 000000000000..4037458499f7
--- /dev/null
+++ b/Documentation/networking/devlink-params-ti-cpsw-switch.txt
@@ -0,0 +1,10 @@
+ale_bypass [DEVICE, DRIVER-SPECIFIC]
+ Allows to enable ALE_CONTROL(4).BYPASS mode for debug purposes.
+ All packets will be sent to the Host port only if enabled.
+ Type: bool
+ Configuration mode: runtime
+
+switch_mode [DEVICE, DRIVER-SPECIFIC]
+ Enable switch mode
+ Type: bool
+ Configuration mode: runtime
diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt
index ddba3e9b55b1..04e234e9acc9 100644
--- a/Documentation/networking/devlink-params.txt
+++ b/Documentation/networking/devlink-params.txt
@@ -65,3 +65,7 @@ reset_dev_on_drv_probe [DEVICE, GENERIC]
Reset only if device firmware can be found in the
filesystem.
Type: u8
+
+enable_roce [DEVICE, GENERIC]
+ Enable handling of RoCE traffic in the device.
+ Type: Boolean
diff --git a/Documentation/networking/devlink-trap.rst b/Documentation/networking/devlink-trap.rst
index 8e90a85f3bd5..dc9659ca06fa 100644
--- a/Documentation/networking/devlink-trap.rst
+++ b/Documentation/networking/devlink-trap.rst
@@ -162,6 +162,67 @@ be added to the following table:
- ``drop``
- Traps packets that the device decided to drop because they could not be
enqueued to a transmission queue which is full
+ * - ``non_ip``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to
+ undergo a layer 3 lookup, but are not IP or MPLS packets
+ * - ``uc_dip_over_mc_dmac``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and they have a unicast destination IP and a multicast destination
+ MAC
+ * - ``dip_is_loopback_address``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their destination IP is the loopback address (i.e., 127.0.0.0/8
+ and ::1/128)
+ * - ``sip_is_mc``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is multicast (i.e., 224.0.0.0/8 and ff::/8)
+ * - ``sip_is_loopback_address``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is the loopback address (i.e., 127.0.0.0/8 and ::1/128)
+ * - ``ip_header_corrupted``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their IP header is corrupted: wrong checksum, wrong IP version
+ or too short Internet Header Length (IHL)
+ * - ``ipv4_sip_is_limited_bc``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is limited broadcast (i.e., 255.255.255.255/32)
+ * - ``ipv6_mc_dip_reserved_scope``
+ - ``drop``
+ - Traps IPv6 packets that the device decided to drop because they need to
+ be routed and their IPv6 multicast destination IP has a reserved scope
+ (i.e., ffx0::/16)
+ * - ``ipv6_mc_dip_interface_local_scope``
+ - ``drop``
+ - Traps IPv6 packets that the device decided to drop because they need to
+ be routed and their IPv6 multicast destination IP has an interface-local scope
+ (i.e., ffx1::/16)
+ * - ``mtu_value_is_too_small``
+ - ``exception``
+ - Traps packets that should have been routed by the device, but were bigger
+ than the MTU of the egress interface
+ * - ``unresolved_neigh``
+ - ``exception``
+ - Traps packets that did not have a matching IP neighbour after routing
+ * - ``mc_reverse_path_forwarding``
+ - ``exception``
+ - Traps multicast IP packets that failed reverse-path forwarding (RPF)
+ check during multicast routing
+ * - ``reject_route``
+ - ``exception``
+ - Traps packets that hit reject routes (i.e., "unreachable", "prohibit")
+ * - ``ipv4_lpm_miss``
+ - ``exception``
+ - Traps unicast IPv4 packets that did not match any route
+ * - ``ipv6_lpm_miss``
+ - ``exception``
+ - Traps unicast IPv6 packets that did not match any route
Driver-specific Packet Traps
============================
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8d4ad1d1ae26..099a55bd1432 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -2091,6 +2091,28 @@ pf_enable - INTEGER
Default: 1
+pf_expose - INTEGER
+ Unset or enable/disable pf (pf is short for potentially failed) state
+ exposure. Applications can control the exposure of the PF path state
+ in the SCTP_PEER_ADDR_CHANGE event and the SCTP_GET_PEER_ADDR_INFO
+ sockopt. When it's unset, no SCTP_PEER_ADDR_CHANGE event with
+ SCTP_ADDR_PF state will be sent and a SCTP_PF-state transport info
+ can be got via SCTP_GET_PEER_ADDR_INFO sockopt; When it's enabled,
+ a SCTP_PEER_ADDR_CHANGE event will be sent for a transport becoming
+ SCTP_PF state and a SCTP_PF-state transport info can be got via
+ SCTP_GET_PEER_ADDR_INFO sockopt; When it's diabled, no
+ SCTP_PEER_ADDR_CHANGE event will be sent and it returns -EACCES when
+ trying to get a SCTP_PF-state transport info via SCTP_GET_PEER_ADDR_INFO
+ sockopt.
+
+ 0: Unset pf state exposure, Compatible with old applications.
+
+ 1: Disable pf state exposure.
+
+ 2: Enable pf state exposure.
+
+ Default: 0
+
addip_noauth_enable - BOOLEAN
Dynamic Address Reconfiguration (ADD-IP) requires the use of
authentication to protect the operations of adding or removing new
@@ -2173,6 +2195,18 @@ pf_retrans - INTEGER
Default: 0
+ps_retrans - INTEGER
+ Primary.Switchover.Max.Retrans (PSMR), it's a tunable parameter coming
+ from section-5 "Primary Path Switchover" in rfc7829. The primary path
+ will be changed to another active path when the path error counter on
+ the old primary path exceeds PSMR, so that "the SCTP sender is allowed
+ to continue data transmission on a new working path even when the old
+ primary destination address becomes active again". Note this feature
+ is disabled by initializing 'ps_retrans' per netns as 0xffff by default,
+ and its value can't be less than 'pf_retrans' when changing by sysctl.
+
+ Default: 0xffff
+
rto_initial - INTEGER
The initial round trip timeout value in milliseconds that will be used
in calculating round trip times. This is the initial time interval
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 0dd3f748239f..f914e81fd3a6 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -436,6 +436,10 @@ by the driver:
encryption.
* ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
but did not arrive in the expected order.
+ * ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
+ a TLS stream and arrived out-of-order, but skipped the HW offload routine
+ and went to the regular transmit flow as they were retransmissions of the
+ connection handshake.
* ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
a TLS stream dropped, because they arrived out of order and associated
record could not be found.
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
index af64c4bb4447..a8de2fbc1caa 100644
--- a/Documentation/x86/index.rst
+++ b/Documentation/x86/index.rst
@@ -27,6 +27,7 @@ x86-specific Documentation
mds
microcode
resctrl_ui
+ tsx_async_abort
usb-legacy-support
i386/index
x86_64/index
diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst
new file mode 100644
index 000000000000..583ddc185ba2
--- /dev/null
+++ b/Documentation/x86/tsx_async_abort.rst
@@ -0,0 +1,117 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TSX Async Abort (TAA) mitigation
+================================
+
+.. _tsx_async_abort:
+
+Overview
+--------
+
+TSX Async Abort (TAA) is a side channel attack on internal buffers in some
+Intel processors similar to Microachitectural Data Sampling (MDS). In this
+case certain loads may speculatively pass invalid data to dependent operations
+when an asynchronous abort condition is pending in a Transactional
+Synchronization Extensions (TSX) transaction. This includes loads with no
+fault or assist condition. Such loads may speculatively expose stale data from
+the same uarch data structures as in MDS, with same scope of exposure i.e.
+same-thread and cross-thread. This issue affects all current processors that
+support TSX.
+
+Mitigation strategy
+-------------------
+
+a) TSX disable - one of the mitigations is to disable TSX. A new MSR
+IA32_TSX_CTRL will be available in future and current processors after
+microcode update which can be used to disable TSX. In addition, it
+controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID.
+
+b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this
+vulnerability. More details on this approach can be found in
+:ref:`Documentation/admin-guide/hw-vuln/mds.rst <mds>`.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ============= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ tsx_async_abort=off is supplied on the kernel command line.
+
+ tsx disabled Mitigation is enabled. TSX feature is disabled by default at
+ bootup on processors that support TSX control.
+
+ verw Mitigation is enabled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+ ============= ============================================================
+
+If the CPU is affected and the "tsx_async_abort" kernel command line parameter is
+not provided then the kernel selects an appropriate mitigation depending on the
+status of RTM and MD_CLEAR CPUID bits.
+
+Below tables indicate the impact of tsx=on|off|auto cmdline options on state of
+TAA mitigation, VERW behavior and TSX feature for various combinations of
+MSR_IA32_ARCH_CAPABILITIES bits.
+
+1. "tsx=off"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Disabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+2. "tsx=on"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Enabled Yes None Same as MDS
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+3. "tsx=auto"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that
+indicates whether MSR_IA32_TSX_CTRL is supported.
+
+There are two control bits in IA32_TSX_CTRL MSR:
+
+ Bit 0: When set it disables the Restricted Transactional Memory (RTM)
+ sub-feature of TSX (will force all transactions to abort on the
+ XBEGIN instruction).
+
+ Bit 1: When set it disables the enumeration of the RTM and HLE feature
+ (i.e. it will make CPUID(EAX=7).EBX{bit4} and
+ CPUID(EAX=7).EBX{bit11} read as 0).
diff --git a/MAINTAINERS b/MAINTAINERS
index c0024b296158..760049454a23 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1182,10 +1182,10 @@ S: Maintained
F: drivers/media/i2c/aptina-pll.*
AQUANTIA ETHERNET DRIVER (atlantic)
-M: Igor Russkikh <igor.russkikh@aquantia.com>
+M: Igor Russkikh <irusskikh@marvell.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.aquantia.com
+W: https://www.marvell.com/
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/aquantia/atlantic/
F: Documentation/networking/device_drivers/aquantia/atlantic.txt
@@ -3060,6 +3060,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
+R: Andrii Nakryiko <andriin@fb.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -3267,7 +3268,6 @@ S: Maintained
F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE
-M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-mips@vger.kernel.org
@@ -3744,7 +3744,6 @@ F: drivers/crypto/cavium/cpt/
CAVIUM THUNDERX2 ARM64 SOC
M: Robert Richter <rrichter@cavium.com>
-M: Jayachandran C <jnair@caviumnetworks.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
@@ -6155,10 +6154,12 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-class-net-phydev
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
F: Documentation/devicetree/bindings/net/mdio*
+F: Documentation/devicetree/bindings/net/qca,ar803x.yaml
F: Documentation/networking/phy.rst
F: drivers/net/phy/
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
+F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/of_net.h
F: include/linux/phy.h
@@ -10531,8 +10532,12 @@ F: mm/memblock.c
F: Documentation/core-api/boot-time-mm.rst
MEMORY MANAGEMENT
+M: Andrew Morton <akpm@linux-foundation.org>
L: linux-mm@kvack.org
W: http://www.linux-mm.org
+T: quilt https://ozlabs.org/~akpm/mmotm/
+T: quilt https://ozlabs.org/~akpm/mmots/
+T: git git://github.com/hnaz/linux-mm.git
S: Maintained
F: include/linux/mm.h
F: include/linux/gfp.h
@@ -10827,6 +10832,7 @@ M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/mscc/
+F: include/soc/mscc/ocelot*
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
M: Chen Yu <yu.c.chen@intel.com>
@@ -17352,6 +17358,14 @@ S: Maintained
F: drivers/input/serio/userio.c
F: include/uapi/linux/userio.h
+VITESSE FELIX ETHERNET SWITCH DRIVER
+M: Vladimir Oltean <vladimir.oltean@nxp.com>
+M: Claudiu Manoil <claudiu.manoil@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/dsa/ocelot/*
+F: net/dsa/tag_ocelot.c
+
VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -18046,6 +18060,7 @@ F: Documentation/vm/zsmalloc.rst
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com>
M: Dan Streetman <ddstreet@ieee.org>
+M: Vitaly Wool <vitaly.wool@konsulko.com>
L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c
diff --git a/Makefile b/Makefile
index 79be70bf2899..42bfda209cb8 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -917,6 +917,9 @@ ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif
+# make the checker run with the right architecture
+CHECKFLAGS += --arch=$(ARCH)
+
# insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 0aaacea1d887..820ce3b60bb6 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -186,3 +186,30 @@
pinctrl-1 = <&mmc2_pins_hs>;
pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>;
};
+
+&mac_sw {
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+};
+
+&cpsw_port1 {
+ phy-handle = <&ethphy0_sw>;
+ phy-mode = "rgmii";
+ ti,dual-emac-pvid = <1>;
+};
+
+&cpsw_port2 {
+ phy-handle = <&ethphy1_sw>;
+ phy-mode = "rgmii";
+ ti,dual-emac-pvid = <2>;
+};
+
+&davinci_mdio_sw {
+ ethphy0_sw: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1_sw: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index ea1c119feaa5..c3d966904d64 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -27,3 +27,8 @@
pinctrl-1 = <&mmc2_pins_hs>;
pinctrl-2 = <&mmc2_pins_ddr_rev20>;
};
+
+&mac {
+ status = "okay";
+ dual_emac;
+};
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index 7935d70874ce..fa0088025b2c 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -35,3 +35,8 @@
pinctrl-1 = <&mmc2_pins_default>;
pinctrl-2 = <&mmc2_pins_default>;
};
+
+&mac {
+ status = "okay";
+ dual_emac;
+};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 423855a2a2d6..398721c7201c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -363,11 +363,6 @@
ext-clk-src;
};
-&mac {
- status = "okay";
- dual_emac;
-};
-
&cpsw_emac0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii";
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 5cac2dd58241..37e048771b0f 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -3079,6 +3079,58 @@
phys = <&phy_gmii_sel 2>;
};
};
+
+ mac_sw: switch@0 {
+ compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch";
+ reg = <0x0 0x4000>;
+ ranges = <0 0 0x4000>;
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ syscon = <&scm_conf>;
+ status = "disabled";
+
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ label = "port2";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
+ };
+ };
+
+ davinci_mdio_sw: mdio@1000 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ reg = <0x1000 0x100>;
+ };
+
+ cpts {
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>;
+ clock-names = "cpts";
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
index 2a6ce87071f9..9e027b9a5f91 100644
--- a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
+++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
@@ -328,6 +328,10 @@
pinctrl-0 = <&pinctrl_pwm3>;
};
+&snvs_pwrkey {
+ status = "okay";
+};
+
&ssi2 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index f3404dd10537..cf628465cd0a 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -230,6 +230,8 @@
accelerometer@1c {
compatible = "fsl,mma8451";
reg = <0x1c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mma8451_int>;
interrupt-parent = <&gpio6>;
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
};
@@ -628,6 +630,12 @@
>;
};
+ pinctrl_mma8451_int: mma8451intgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0xb0b1
+ >;
+ };
+
pinctrl_pwm3: pwm1grp {
fsl,pins = <
MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 89d29b50c3f4..91fc0a315c49 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -183,14 +183,12 @@
ov5640: camera@3c {
compatible = "ovti,ov5640";
- pinctrl-names = "default";
- pinctrl-0 = <&ov5640_pins>;
reg = <0x3c>;
clocks = <&clk_ext_camera>;
clock-names = "xclk";
DOVDD-supply = <&v2v8>;
- powerdown-gpios = <&stmfx_pinctrl 18 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&stmfx_pinctrl 19 GPIO_ACTIVE_LOW>;
+ powerdown-gpios = <&stmfx_pinctrl 18 (GPIO_ACTIVE_HIGH | GPIO_PUSH_PULL)>;
+ reset-gpios = <&stmfx_pinctrl 19 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
rotation = <180>;
status = "okay";
@@ -223,15 +221,8 @@
joystick_pins: joystick {
pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
- drive-push-pull;
bias-pull-down;
};
-
- ov5640_pins: camera {
- pins = "agpio2", "agpio3"; /* stmfx pins 18 & 19 */
- drive-push-pull;
- output-low;
- };
};
};
};
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 9b11654a0a39..f98e0370c0bc 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -932,7 +932,7 @@
interrupt-names = "int0", "int1";
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
+ bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
status = "disabled";
};
@@ -945,7 +945,7 @@
interrupt-names = "int0", "int1";
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
+ bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 568b90ece342..3bec3e0a81b2 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -192,6 +192,7 @@
vqmmc-supply = <&reg_dldo1>;
non-removable;
wakeup-source;
+ keep-power-in-suspend;
status = "okay";
brcmf: wifi@1 {
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 40d7f1a4fc45..89cce8d4bc6b 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -554,3 +554,4 @@ CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_TI_CPSW_SWITCHDEV=y
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 865b10344ea2..0474a4b1394d 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -12,6 +12,7 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
@@ -22,7 +23,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/regulator/machine.h>
#include "generic.h"
@@ -69,8 +69,9 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
.gpio_cs = ICONTROL_MCP251x_nCS4
};
-static struct mcp251x_platform_data mcp251x_info = {
- .oscillator_frequency = 16E6,
+static const struct property_entry mcp251x_properties[] = {
+ PROPERTY_ENTRY_U32("clock-frequency", 16000000),
+ {}
};
static struct spi_board_info mcp251x_board_info[] = {
@@ -79,7 +80,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 0,
- .platform_data = &mcp251x_info,
+ .properties = mcp251x_properties,
.controller_data = &mcp251x_chip_info1,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1)
},
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index da113c8eefbf..b27fc7ac9cea 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/serial_8250.h>
@@ -27,7 +28,6 @@
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/pca953x.h>
#include <linux/apm-emulation.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -428,14 +428,15 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = {
},
};
-static struct mcp251x_platform_data zeus_mcp2515_pdata = {
- .oscillator_frequency = 16*1000*1000,
+static const struct property_entry mcp251x_properties[] = {
+ PROPERTY_ENTRY_U32("clock-frequency", 16000000),
+ {}
};
static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
.modalias = "mcp2515",
- .platform_data = &zeus_mcp2515_pdata,
+ .properties = mcp251x_properties,
.irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
.bus_num = 3,
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index 239084cf8192..26cbce135338 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -481,14 +481,18 @@ static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
{
u32 reg;
+ int gating_bit = cpu;
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
return -EINVAL;
+ if (is_a83t && cpu == 0)
+ gating_bit = 4;
+
/* gate processor power */
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
- reg |= PRCM_PWROFF_GATING_REG_CORE(cpu);
+ reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
udelay(20);
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index d98346da01df..078a5010228c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -127,7 +127,7 @@
status = "okay";
i2c-mux@77 {
- compatible = "nxp,pca9847";
+ compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 58b8cd06cae7..23c8fad7932b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -394,7 +394,7 @@
};
sdma2: dma-controller@302c0000 {
- compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
reg = <0x302c0000 0x10000>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
@@ -405,7 +405,7 @@
};
sdma3: dma-controller@302b0000 {
- compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
reg = <0x302b0000 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
@@ -737,7 +737,7 @@
};
sdma1: dma-controller@30bd0000 {
- compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 98496f570720..43c4db312146 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -288,7 +288,7 @@
};
sdma3: dma-controller@302b0000 {
- compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
reg = <0x302b0000 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>,
@@ -299,7 +299,7 @@
};
sdma2: dma-controller@302c0000 {
- compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
reg = <0x302c0000 0x10000>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>,
@@ -612,7 +612,7 @@
};
sdma1: dma-controller@30bd0000 {
- compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+ compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 087b5b6ebe89..32ce14936b01 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -88,7 +88,7 @@
regulator-name = "0V9_ARM";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1000000>;
- gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
states = <1000000 0x1
900000 0x0>;
regulator-always-on;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 8330810f699e..565aa45ef134 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -283,23 +283,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}
-#define __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t pte_a, pte_t pte_b)
-{
- pteval_t lhs, rhs;
-
- lhs = pte_val(pte_a);
- rhs = pte_val(pte_b);
-
- if (pte_present(pte_a))
- lhs &= ~PTE_RDONLY;
-
- if (pte_present(pte_b))
- rhs &= ~PTE_RDONLY;
-
- return (lhs == rhs);
-}
-
/*
* Huge pte definitions.
*/
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index 0c731bfc7c8c..0c20a7c1bee5 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -31,13 +31,6 @@ int __arm64_get_clock_mode(struct timekeeper *tk)
#define __arch_get_clock_mode __arm64_get_clock_mode
static __always_inline
-int __arm64_use_vsyscall(struct vdso_data *vdata)
-{
- return !vdata[CS_HRES_COARSE].clock_mode;
-}
-#define __arch_use_vsyscall __arm64_use_vsyscall
-
-static __always_inline
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
{
vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
index 195314732233..00d41b94ba31 100644
--- a/arch/mips/include/asm/vdso/vsyscall.h
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -28,13 +28,6 @@ int __mips_get_clock_mode(struct timekeeper *tk)
}
#define __arch_get_clock_mode __mips_get_clock_mode
-static __always_inline
-int __mips_use_vsyscall(struct vdso_data *vdata)
-{
- return (vdata[CS_HRES_COARSE].clock_mode != VDSO_CLOCK_NONE);
-}
-#define __arch_use_vsyscall __mips_use_vsyscall
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index ef3847e7aee0..e5b6cadbec85 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -38,10 +38,3 @@ config REPLICATE_KTEXT
Say Y here to enable replicating the kernel text across multiple
nodes in a NUMA cluster. This trades memory for speed.
-config REPLICATE_EXHANDLERS
- bool "Exception handler replication support"
- depends on SGI_IP27
- help
- Say Y here to enable replicating the kernel exception handlers
- across multiple nodes in a NUMA cluster. This trades memory for
- speed.
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 59d5375c9021..79a52c472782 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -69,23 +69,14 @@ static void per_hub_init(cnodeid_t cnode)
hub_rtc_init(cnode);
-#ifdef CONFIG_REPLICATE_EXHANDLERS
- /*
- * If this is not a headless node initialization,
- * copy over the caliased exception handlers.
- */
- if (get_compact_nodeid() == cnode) {
- extern char except_vec2_generic, except_vec3_generic;
- extern void build_tlb_refill_handler(void);
-
- memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
- memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
- build_tlb_refill_handler();
- memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
- memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
+ if (nasid) {
+ /* copy exception handlers from first node to current node */
+ memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
+ (void *)CKSEG0, 0x200);
__flush_cache_all();
+ /* switch to node local exception handlers */
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
}
-#endif
}
void per_cpu_init(void)
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index fb077a947575..8624a885d95b 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -332,11 +332,7 @@ static void __init mlreset(void)
* thinks it is a node 0 address.
*/
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
-#ifdef CONFIG_REPLICATE_EXHANDLERS
- REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
-#else
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
-#endif
#ifdef LATER
/*
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 677e9babef80..f9dc597b0b86 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -91,6 +91,7 @@
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
{
+ addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
mtsrin(sr, addr);
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 409c9bfb43d9..57c229a86f08 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -175,4 +175,7 @@ do { \
ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0)
+/* Relocate the kernel image to @final_address */
+void relocate(unsigned long final_address);
+
#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a4e7762dd286..100f1b57ec2f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -3249,7 +3249,20 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
/* Switch to secure mode. */
prom_printf("Switching to secure mode.\n");
+ /*
+ * The ultravisor will do an integrity check of the kernel image but we
+ * relocated it so the check will fail. Restore the original image by
+ * relocating it back to the kernel virtual base address.
+ */
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ relocate(KERNELBASE);
+
ret = enter_secure_mode(kbase, fdt);
+
+ /* Relocate the kernel again. */
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ relocate(kbase);
+
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
prom_rtas_os_term("Switch to secure mode failed.\n");
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 78bab17b1396..b183ab9c5107 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -26,7 +26,8 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC.
+relocate"
NM="$1"
OBJ="$2"
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 02a59946a78a..be3517ef0574 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -1142,6 +1142,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
/*
+ * If we have seen a tail call, we need a second pass.
+ * This is because bpf_jit_emit_common_epilogue() is called
+ * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
+ */
+ if (cgctx.seen & SEEN_TAILCALL) {
+ cgctx.idx = 0;
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
+ fp = org_fp;
+ goto out_addrs;
+ }
+ }
+
+ /*
* Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 6bc24a47e9ef..6f300ab7f0e9 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -42,7 +42,7 @@ void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
- if (eeh_has_flag(EEH_FORCE_DISABLED))
+ if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
return;
dev_dbg(&pdev->dev, "EEH: Setting up device\n");
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fbd6e6b7bbf2..13e251699346 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -146,20 +146,25 @@ static int pnv_smp_cpu_disable(void)
return 0;
}
+static void pnv_flush_interrupts(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (xive_enabled())
+ xive_flush_interrupt();
+ else
+ icp_opal_flush_interrupt();
+ } else {
+ icp_native_flush_interrupt();
+ }
+}
+
static void pnv_smp_cpu_kill_self(void)
{
+ unsigned long srr1, unexpected_mask, wmask;
unsigned int cpu;
- unsigned long srr1, wmask;
u64 lpcr_val;
/* Standard hot unplug procedure */
- /*
- * This hard disables local interurpts, ensuring we have no lazy
- * irqs pending.
- */
- WARN_ON(irqs_disabled());
- hard_irq_disable();
- WARN_ON(lazy_irq_pending());
idle_task_exit();
current->active_mm = NULL; /* for sanity */
@@ -173,6 +178,27 @@ static void pnv_smp_cpu_kill_self(void)
wmask = SRR1_WAKEMASK_P8;
/*
+ * This turns the irq soft-disabled state we're called with, into a
+ * hard-disabled state with pending irq_happened interrupts cleared.
+ *
+ * PACA_IRQ_DEC - Decrementer should be ignored.
+ * PACA_IRQ_HMI - Can be ignored, processing is done in real mode.
+ * PACA_IRQ_DBELL, EE, PMI - Unexpected.
+ */
+ hard_irq_disable();
+ if (generic_check_cpu_restart(cpu))
+ goto out;
+
+ unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
+ if (local_paca->irq_happened & unexpected_mask) {
+ if (local_paca->irq_happened & PACA_IRQ_EE)
+ pnv_flush_interrupts();
+ DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
+ cpu, local_paca->irq_happened);
+ }
+ local_paca->irq_happened = PACA_IRQ_HARD_DIS;
+
+ /*
* We don't want to take decrementer interrupts while we are
* offline, so clear LPCR:PECE1. We keep PECE2 (and
* LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
@@ -197,6 +223,7 @@ static void pnv_smp_cpu_kill_self(void)
srr1 = pnv_cpu_offline(cpu);
+ WARN_ON_ONCE(!irqs_disabled());
WARN_ON(lazy_irq_pending());
/*
@@ -212,13 +239,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI)) {
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (xive_enabled())
- xive_flush_interrupt();
- else
- icp_opal_flush_interrupt();
- } else
- icp_native_flush_interrupt();
+ pnv_flush_interrupts();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -266,7 +287,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
-
+out:
DBG("CPU%d coming online...\n", cpu);
}
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index d827b5b9a32c..eaaefeceef6f 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -35,6 +35,7 @@ struct unwind_state {
struct task_struct *task;
struct pt_regs *regs;
unsigned long sp, ip;
+ bool reuse_sp;
int graph_idx;
bool reliable;
bool error;
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index b9d8fe45737a..8f8456816d83 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
- unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int seq;
do {
- now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_time = READ_ONCE(idle->idle_time);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
- idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ in_idle = 0;
+ now = get_tod_clock();
+ if (idle_enter) {
+ if (idle_exit) {
+ in_idle = idle_exit - idle_enter;
+ } else if (now > idle_enter) {
+ in_idle = now - idle_enter;
+ }
+ }
+ idle_time += in_idle;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
u64 arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
- unsigned long long now, idle_enter, idle_exit;
+ unsigned long long now, idle_enter, idle_exit, in_idle;
unsigned int seq;
do {
- now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
-
- return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
+ in_idle = 0;
+ now = get_tod_clock();
+ if (idle_enter) {
+ if (idle_exit) {
+ in_idle = idle_exit - idle_enter;
+ } else if (now > idle_enter) {
+ in_idle = now - idle_enter;
+ }
+ }
+ return cputime_to_nsecs(in_idle);
}
void arch_cpu_idle_enter(void)
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index 8fc9daae47a2..a8204f952315 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state)
regs = state->regs;
if (unlikely(regs)) {
- sp = READ_ONCE_NOCHECK(regs->gprs[15]);
- if (unlikely(outside_of_stack(state, sp))) {
- if (!update_stack_info(state, sp))
- goto out_err;
+ if (state->reuse_sp) {
+ sp = state->sp;
+ state->reuse_sp = false;
+ } else {
+ sp = READ_ONCE_NOCHECK(regs->gprs[15]);
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
}
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
@@ -107,9 +112,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
{
struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask;
+ bool reliable, reuse_sp;
struct stack_frame *sf;
unsigned long ip;
- bool reliable;
memset(state, 0, sizeof(*state));
state->task = task;
@@ -134,10 +139,12 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
if (regs) {
ip = READ_ONCE_NOCHECK(regs->psw.addr);
reliable = true;
+ reuse_sp = true;
} else {
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
+ reuse_sp = false;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -151,5 +158,6 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
state->sp = sp;
state->ip = ip;
state->reliable = reliable;
+ state->reuse_sp = reuse_sp;
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 510a18299196..a51c892f14f3 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
+ len = min(*lenp, sizeof(buf));
+ if (copy_from_user(buf, buffer, len))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
+ buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
+ *ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
+ *lenp = len;
+ *ppos += len;
}
- *lenp = len;
- *ppos += len;
return 0;
}
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index 324a23947585..997ffe46e953 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
#
-CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg
+CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d6e1faa28c58..8ef85139553f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1940,6 +1940,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
If unsure, say y.
+choice
+ prompt "TSX enable mode"
+ depends on CPU_SUP_INTEL
+ default X86_INTEL_TSX_MODE_OFF
+ help
+ Intel's TSX (Transactional Synchronization Extensions) feature
+ allows to optimize locking protocols through lock elision which
+ can lead to a noticeable performance boost.
+
+ On the other hand it has been shown that TSX can be exploited
+ to form side channel attacks (e.g. TAA) and chances are there
+ will be more of those attacks discovered in the future.
+
+ Therefore TSX is not enabled by default (aka tsx=off). An admin
+ might override this decision by tsx=on the command line parameter.
+ Even with TSX enabled, the kernel will attempt to enable the best
+ possible TAA mitigation setting depending on the microcode available
+ for the particular machine.
+
+ This option allows to set the default tsx mode between tsx=on, =off
+ and =auto. See Documentation/admin-guide/kernel-parameters.txt for more
+ details.
+
+ Say off if not sure, auto if TSX is in use but it should be used on safe
+ platforms or on if TSX is in use and the security aspect of tsx is not
+ relevant.
+
+config X86_INTEL_TSX_MODE_OFF
+ bool "off"
+ help
+ TSX is disabled if possible - equals to tsx=off command line parameter.
+
+config X86_INTEL_TSX_MODE_ON
+ bool "on"
+ help
+ TSX is always enabled on TSX capable HW - equals the tsx=on command
+ line parameter.
+
+config X86_INTEL_TSX_MODE_AUTO
+ bool "auto"
+ help
+ TSX is enabled on TSX capable HW that is believed to be safe against
+ side channel attacks- equals the tsx=auto command line parameter.
+endchoice
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0652d3eed9bd..c4fbe379cc0b 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -399,5 +399,7 @@
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 24d6598dea29..4fc61483919a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -312,9 +312,12 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
+ struct list_head lpage_disallowed_link;
+
bool unsync;
u8 mmu_valid_gen;
bool mmio_cached;
+ bool lpage_disallowed; /* Can't be replaced by an equiv large page */
/*
* The following two entries are used to key the shadow page in the
@@ -859,6 +862,7 @@ struct kvm_arch {
*/
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
+ struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
@@ -933,6 +937,7 @@ struct kvm_arch {
bool exception_payload_enabled;
struct kvm_pmu_event_filter *pmu_event_filter;
+ struct task_struct *nx_lpage_recovery_thread;
};
struct kvm_vm_stat {
@@ -946,6 +951,7 @@ struct kvm_vm_stat {
ulong mmu_unsync;
ulong remote_tlb_flush;
ulong lpages;
+ ulong nx_lpage_splits;
ulong max_mmu_page_hash_collisions;
};
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 20ce682a2540..6a3124664289 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -93,6 +93,18 @@
* Microarchitectural Data
* Sampling (MDS) vulnerabilities.
*/
+#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /*
+ * The processor is not susceptible to a
+ * machine check error due to modifying the
+ * code page size along with either the
+ * physical address or cache type
+ * without TLB invalidation.
+ */
+#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */
+#define ARCH_CAP_TAA_NO BIT(8) /*
+ * Not susceptible to
+ * TSX Async Abort (TAA) vulnerabilities.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -103,6 +115,10 @@
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
+#define MSR_IA32_TSX_CTRL 0x00000122
+#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
+#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 80bc209c0708..5c24a7b35166 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
#include <asm/segment.h>
/**
- * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the
@@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void)
}
/**
- * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
*/
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6e0a3b43d027..54f5d54280f6 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -988,4 +988,11 @@ enum mds_mitigations {
MDS_MITIGATION_VMWERV,
};
+enum taa_mitigations {
+ TAA_MITIGATION_OFF,
+ TAA_MITIGATION_UCODE_NEEDED,
+ TAA_MITIGATION_VERW,
+ TAA_MITIGATION_TSX_DISABLED,
+};
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 9e2dd2b296cd..2b0faf86da1b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1586,9 +1586,6 @@ static void setup_local_APIC(void)
{
int cpu = smp_processor_id();
unsigned int value;
-#ifdef CONFIG_X86_32
- int logical_apicid, ldr_apicid;
-#endif
if (disable_apic) {
disable_ioapic_support();
@@ -1626,16 +1623,21 @@ static void setup_local_APIC(void)
apic->init_apic_ldr();
#ifdef CONFIG_X86_32
- /*
- * APIC LDR is initialized. If logical_apicid mapping was
- * initialized during get_smp_config(), make sure it matches the
- * actual value.
- */
- logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
- ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
- WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid);
- /* always use the value from LDR */
- early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
+ if (apic->dest_logical) {
+ int logical_apicid, ldr_apicid;
+
+ /*
+ * APIC LDR is initialized. If logical_apicid mapping was
+ * initialized during get_smp_config(), make sure it matches
+ * the actual value.
+ */
+ logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+ ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+ if (logical_apicid != BAD_APICID)
+ WARN_ON(logical_apicid != ldr_apicid);
+ /* Always use the value from LDR. */
+ early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
+ }
#endif
/*
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index d7a1e5a9331c..890f60083eca 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
ifdef CONFIG_CPU_SUP_INTEL
-obj-y += intel.o intel_pconfig.o
+obj-y += intel.o intel_pconfig.o tsx.o
obj-$(CONFIG_PM) += intel_epb.o
endif
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 91c2561b905f..4c7b0fa15a19 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
+static void __init taa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -105,6 +106,7 @@ void __init check_bugs(void)
ssb_select_mitigation();
l1tf_select_mitigation();
mds_select_mitigation();
+ taa_select_mitigation();
arch_smt_update();
@@ -269,6 +271,100 @@ static int __init mds_cmdline(char *str)
early_param("mds", mds_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "TAA: " fmt
+
+/* Default mitigation for TAA-affected CPUs */
+static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
+static bool taa_nosmt __ro_after_init;
+
+static const char * const taa_strings[] = {
+ [TAA_MITIGATION_OFF] = "Vulnerable",
+ [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+ [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
+ [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
+};
+
+static void __init taa_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /* TSX previously disabled by tsx=off */
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
+ goto out;
+ }
+
+ if (cpu_mitigations_off()) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
+ if (taa_mitigation == TAA_MITIGATION_OFF)
+ goto out;
+
+ if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ taa_mitigation = TAA_MITIGATION_VERW;
+ else
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+ * A microcode update fixes this behavior to clear CPU buffers. It also
+ * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+ * ARCH_CAP_TSX_CTRL_MSR bit.
+ *
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+ !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * TSX is enabled, select alternate mitigation for TAA which is
+ * the same as MDS. Enable MDS static branch to clear CPU buffers.
+ *
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+ static_branch_enable(&mds_user_clear);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+
+out:
+ pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static int __init tsx_async_abort_parse_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_TAA))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off")) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ } else if (!strcmp(str, "full")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ } else if (!strcmp(str, "full,nosmt")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ taa_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -786,13 +882,10 @@ static void update_mds_branch_idle(void)
}
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
void cpu_bugs_smt_update(void)
{
- /* Enhanced IBRS implies STIBP. No update required. */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
- return;
-
mutex_lock(&spec_ctrl_mutex);
switch (spectre_v2_user) {
@@ -819,6 +912,17 @@ void cpu_bugs_smt_update(void)
break;
}
+ switch (taa_mitigation) {
+ case TAA_MITIGATION_VERW:
+ case TAA_MITIGATION_UCODE_NEEDED:
+ if (sched_smt_active())
+ pr_warn_once(TAA_MSG_SMT);
+ break;
+ case TAA_MITIGATION_TSX_DISABLED:
+ case TAA_MITIGATION_OFF:
+ break;
+ }
+
mutex_unlock(&spec_ctrl_mutex);
}
@@ -1149,6 +1253,9 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+bool itlb_multihit_kvm_mitigation;
+EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
+
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
@@ -1304,11 +1411,24 @@ static ssize_t l1tf_show_state(char *buf)
l1tf_vmx_states[l1tf_vmx_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ if (itlb_multihit_kvm_mitigation)
+ return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
+ else
+ return sprintf(buf, "KVM: Vulnerable\n");
+}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ return sprintf(buf, "Processor vulnerable\n");
+}
#endif
static ssize_t mds_show_state(char *buf)
@@ -1328,6 +1448,21 @@ static ssize_t mds_show_state(char *buf)
sched_smt_active() ? "vulnerable" : "disabled");
}
+static ssize_t tsx_async_abort_show_state(char *buf)
+{
+ if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+ (taa_mitigation == TAA_MITIGATION_OFF))
+ return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ taa_strings[taa_mitigation]);
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
static char *stibp_state(void)
{
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1398,6 +1533,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_MDS:
return mds_show_state(buf);
+ case X86_BUG_TAA:
+ return tsx_async_abort_show_state(buf);
+
+ case X86_BUG_ITLB_MULTIHIT:
+ return itlb_multihit_show_state(buf);
+
default:
break;
}
@@ -1434,4 +1575,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu
{
return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
}
+
+ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
+}
+
+ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9ae7d1bcd4f4..fffe21945374 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1016,13 +1016,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
-#define NO_SPECULATION BIT(0)
-#define NO_MELTDOWN BIT(1)
-#define NO_SSB BIT(2)
-#define NO_L1TF BIT(3)
-#define NO_MDS BIT(4)
-#define MSBDS_ONLY BIT(5)
-#define NO_SWAPGS BIT(6)
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+#define NO_MDS BIT(4)
+#define MSBDS_ONLY BIT(5)
+#define NO_SWAPGS BIT(6)
+#define NO_ITLB_MULTIHIT BIT(7)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1043,27 +1044,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */
- VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
- VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
- VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
-
- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(CORE_YONAH, NO_SSB),
- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1073,15 +1074,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
+
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
{}
};
@@ -1092,19 +1095,30 @@ static bool __init cpu_matches(unsigned long which)
return m && !!(m->driver_data & which);
}
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+u64 x86_read_arch_cap_msr(void)
{
u64 ia32_cap = 0;
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
if (cpu_matches(NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
-
if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1121,6 +1135,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (!cpu_matches(NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);
+ /*
+ * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+ * - TSX is supported or
+ * - TSX_CTRL is present
+ *
+ * TSX_CTRL check is needed for cases when TSX could be disabled before
+ * the kernel boot e.g. kexec.
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+ if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+ (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
if (cpu_matches(NO_MELTDOWN))
return;
@@ -1554,6 +1583,8 @@ void __init identify_boot_cpu(void)
#endif
cpu_detect_tlb(&boot_cpu_data);
setup_cr_pinning();
+
+ tsx_init();
}
void identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index c0e2407abdd6..38ab6e115eac 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -44,6 +44,22 @@ struct _tlb_table {
extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
+#ifdef CONFIG_CPU_SUP_INTEL
+enum tsx_ctrl_states {
+ TSX_CTRL_ENABLE,
+ TSX_CTRL_DISABLE,
+ TSX_CTRL_NOT_SUPPORTED,
+};
+
+extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+
+extern void __init tsx_init(void);
+extern void tsx_enable(void);
+extern void tsx_disable(void);
+#else
+static inline void tsx_init(void) { }
+#endif /* CONFIG_CPU_SUP_INTEL */
+
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
@@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
+extern u64 x86_read_arch_cap_msr(void);
+
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c2fdc00df163..11d5c5950e2d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c)
detect_tme(c);
init_intel_misc_features(c);
+
+ if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+ tsx_enable();
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ tsx_disable();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index efbd54cc4e69..055c8613b531 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -522,6 +522,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ ret = -ENOENT;
+ goto out;
+ }
md.priv = of->kn->priv;
resid = md.u.rid;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index a46dee8e78db..2e3b06d6bbc6 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
}
rdtgrp = rdtgroup_kn_lock_live(of->kn);
- rdt_last_cmd_clear();
if (!rdtgrp) {
ret = -ENOENT;
- rdt_last_cmd_puts("Directory was removed\n");
goto unlock;
}
@@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
int ret;
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
- rdt_last_cmd_clear();
if (!prdtgrp) {
ret = -ENODEV;
- rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock;
}
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
new file mode 100644
index 000000000000..3e20d322bc98
--- /dev/null
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Transactional Synchronization Extensions (TSX) control.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ * Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+
+#include <asm/cmdline.h>
+
+#include "cpu.h"
+
+enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
+
+void tsx_disable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Force all transactions to immediately abort */
+ tsx |= TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is not enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * do not waste resources trying TSX transactions that
+ * will always abort.
+ */
+ tsx |= TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+void tsx_enable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Enable the RTM feature in the cpu */
+ tsx &= ~TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * can enumerate and use the TSX feature.
+ */
+ tsx &= ~TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+static bool __init tsx_ctrl_is_supported(void)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /*
+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+ *
+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+ * tsx= cmdline requests will do nothing on CPUs without
+ * MSR_IA32_TSX_CTRL support.
+ */
+ return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+}
+
+static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+ return TSX_CTRL_DISABLE;
+
+ return TSX_CTRL_ENABLE;
+}
+
+void __init tsx_init(void)
+{
+ char arg[5] = {};
+ int ret;
+
+ if (!tsx_ctrl_is_supported())
+ return;
+
+ ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
+ if (ret >= 0) {
+ if (!strcmp(arg, "on")) {
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ } else if (!strcmp(arg, "off")) {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ } else if (!strcmp(arg, "auto")) {
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ } else {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ pr_err("tsx: invalid option, defaulting to off\n");
+ }
+ } else {
+ /* tsx= not provided */
+ if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ else
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ }
+
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
+ tsx_disable();
+
+ /*
+ * tsx_disable() will change the state of the
+ * RTM CPUID bit. Clear it here since it is now
+ * expected to be not set.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
+
+ /*
+ * HW defaults TSX to be enabled at bootup.
+ * We may still need the TSX enable support
+ * during init for special cases like
+ * kexec after TSX is disabled.
+ */
+ tsx_enable();
+
+ /*
+ * tsx_enable() will change the state of the
+ * RTM CPUID bit. Force it here since it is now
+ * expected to be set.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RTM);
+ }
+}
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 753b8cfe8b8a..87b97897a881 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -94,6 +94,13 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
+ /*
+ * Handle the case where stack trace is collected _before_
+ * cea_exception_stacks had been initialized.
+ */
+ if (!begin)
+ return false;
+
end = begin + sizeof(struct cea_exception_stacks);
/* Bail if @stack is outside the exception stack area. */
if (stk < begin || stk >= end)
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6f6b1d04dadf..4cba91ec8049 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_INTEL, 0x3ec4,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c59454c382fd..7e322e2daaf5 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1505,6 +1505,9 @@ void __init tsc_init(void)
return;
}
+ if (tsc_clocksource_reliable || no_tsc_watchdog)
+ clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24c23c66b226..2ce9da58611e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/kern_levels.h>
+#include <linux/kthread.h>
#include <asm/page.h>
#include <asm/pat.h>
@@ -47,6 +48,35 @@
#include <asm/kvm_page_track.h>
#include "trace.h"
+extern bool itlb_multihit_kvm_mitigation;
+
+static int __read_mostly nx_huge_pages = -1;
+#ifdef CONFIG_PREEMPT_RT
+/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
+static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+#else
+static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+#endif
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
+
+static struct kernel_param_ops nx_huge_pages_ops = {
+ .set = set_nx_huge_pages,
+ .get = param_get_bool,
+};
+
+static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
+ .set = set_nx_huge_pages_recovery_ratio,
+ .get = param_get_uint,
+};
+
+module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages, "bool");
+module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
+ &nx_huge_pages_recovery_ratio, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
+
/*
* When setting this variable to true it enables Two-Dimensional-Paging
* where the hardware walks 2 page tables:
@@ -352,6 +382,11 @@ static inline bool spte_ad_need_write_protect(u64 spte)
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
}
+static bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
MMU_WARN_ON(is_mmio_spte(spte));
@@ -1190,6 +1225,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_disallow_lpage(slot, gfn);
}
+static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ if (sp->lpage_disallowed)
+ return;
+
+ ++kvm->stat.nx_lpage_splits;
+ list_add_tail(&sp->lpage_disallowed_link,
+ &kvm->arch.lpage_disallowed_mmu_pages);
+ sp->lpage_disallowed = true;
+}
+
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
@@ -1207,6 +1253,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ --kvm->stat.nx_lpage_splits;
+ sp->lpage_disallowed = false;
+ list_del(&sp->lpage_disallowed_link);
+}
+
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
@@ -2792,6 +2845,9 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
kvm_reload_remote_mmus(kvm);
}
+ if (sp->lpage_disallowed)
+ unaccount_huge_nx_page(kvm, sp);
+
sp->role.invalid = 1;
return list_unstable;
}
@@ -3013,6 +3069,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!speculative)
spte |= spte_shadow_accessed_mask(spte);
+ if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
+ is_nx_huge_page_enabled()) {
+ pte_access &= ~ACC_EXEC_MASK;
+ }
+
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
@@ -3233,9 +3294,32 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}
+static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
+ gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
+{
+ int level = *levelp;
+ u64 spte = *it.sptep;
+
+ if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
+ is_nx_huge_page_enabled() &&
+ is_shadow_present_pte(spte) &&
+ !is_large_pte(spte)) {
+ /*
+ * A small SPTE exists for this pfn, but FNAME(fetch)
+ * and __direct_map would like to create a large PTE
+ * instead: just force them to go down another level,
+ * patching back for them into pfn the next 9 bits of
+ * the address.
+ */
+ u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
+ *pfnp |= gfn & page_mask;
+ (*levelp)--;
+ }
+}
+
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
int map_writable, int level, kvm_pfn_t pfn,
- bool prefault)
+ bool prefault, bool lpage_disallowed)
{
struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp;
@@ -3248,6 +3332,12 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) {
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &level);
+
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level)
break;
@@ -3258,6 +3348,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
it.level - 1, true, ACC_ALL);
link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
}
}
@@ -3306,7 +3398,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* here.
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
- level == PT_PAGE_TABLE_LEVEL &&
+ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
@@ -3550,11 +3642,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- bool force_pt_level = false;
+ bool force_pt_level;
kvm_pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ force_pt_level = lpage_disallowed;
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
/*
@@ -3588,7 +3683,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
- r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
+ r = __direct_map(vcpu, v, write, map_writable, level, pfn,
+ prefault, false);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
@@ -4174,6 +4270,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
@@ -4184,8 +4282,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (r)
return r;
- force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
- PT_DIRECTORY_LEVEL);
+ force_pt_level =
+ lpage_disallowed ||
+ !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
if (level > PT_DIRECTORY_LEVEL &&
@@ -4214,7 +4313,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
- r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
+ r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
+ prefault, lpage_disallowed);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
@@ -5914,9 +6014,9 @@ restart:
* the guest, and the guest page table is using 4K page size
* mapping if the indirect sp has level = 1.
*/
- if (sp->role.direct &&
- !kvm_is_reserved_pfn(pfn) &&
- PageTransCompoundMap(pfn_to_page(pfn))) {
+ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
+ !kvm_is_zone_device_pfn(pfn) &&
+ PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
@@ -6155,10 +6255,59 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
}
+static bool get_nx_auto_mode(void)
+{
+ /* Return true when CPU has the bug, and mitigations are ON */
+ return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
+}
+
+static void __set_nx_huge_pages(bool val)
+{
+ nx_huge_pages = itlb_multihit_kvm_mitigation = val;
+}
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+{
+ bool old_val = nx_huge_pages;
+ bool new_val;
+
+ /* In "auto" mode deploy workaround only if CPU has the bug. */
+ if (sysfs_streq(val, "off"))
+ new_val = 0;
+ else if (sysfs_streq(val, "force"))
+ new_val = 1;
+ else if (sysfs_streq(val, "auto"))
+ new_val = get_nx_auto_mode();
+ else if (strtobool(val, &new_val) < 0)
+ return -EINVAL;
+
+ __set_nx_huge_pages(new_val);
+
+ if (new_val != old_val) {
+ struct kvm *kvm;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ mutex_lock(&kvm->slots_lock);
+ kvm_mmu_zap_all_fast(kvm);
+ mutex_unlock(&kvm->slots_lock);
+
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+ }
+ mutex_unlock(&kvm_lock);
+ }
+
+ return 0;
+}
+
int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
+ if (nx_huge_pages == -1)
+ __set_nx_huge_pages(get_nx_auto_mode());
+
/*
* MMU roles use union aliasing which is, generally speaking, an
* undefined behavior. However, we supposedly know how compilers behave
@@ -6238,3 +6387,116 @@ void kvm_mmu_module_exit(void)
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
+
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
+{
+ unsigned int old_val;
+ int err;
+
+ old_val = nx_huge_pages_recovery_ratio;
+ err = param_set_uint(val, kp);
+ if (err)
+ return err;
+
+ if (READ_ONCE(nx_huge_pages) &&
+ !old_val && nx_huge_pages_recovery_ratio) {
+ struct kvm *kvm;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+
+ mutex_unlock(&kvm_lock);
+ }
+
+ return err;
+}
+
+static void kvm_recover_nx_lpages(struct kvm *kvm)
+{
+ int rcu_idx;
+ struct kvm_mmu_page *sp;
+ unsigned int ratio;
+ LIST_HEAD(invalid_list);
+ ulong to_zap;
+
+ rcu_idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+
+ ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+ to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
+ /*
+ * We use a separate list instead of just using active_mmu_pages
+ * because the number of lpage_disallowed pages is expected to
+ * be relatively small compared to the total.
+ */
+ sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
+ struct kvm_mmu_page,
+ lpage_disallowed_link);
+ WARN_ON_ONCE(!sp->lpage_disallowed);
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ WARN_ON_ONCE(sp->lpage_disallowed);
+
+ if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ if (to_zap)
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, rcu_idx);
+}
+
+static long get_nx_lpage_recovery_timeout(u64 start_time)
+{
+ return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
+ ? start_time + 60 * HZ - get_jiffies_64()
+ : MAX_SCHEDULE_TIMEOUT;
+}
+
+static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
+{
+ u64 start_time;
+ long remaining_time;
+
+ while (true) {
+ start_time = get_jiffies_64();
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop() && remaining_time > 0) {
+ schedule_timeout(remaining_time);
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ return 0;
+
+ kvm_recover_nx_lpages(kvm);
+ }
+}
+
+int kvm_mmu_post_init_vm(struct kvm *kvm)
+{
+ int err;
+
+ err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
+ "kvm-nx-lpage-recovery",
+ &kvm->arch.nx_lpage_recovery_thread);
+ if (!err)
+ kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
+
+ return err;
+}
+
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+{
+ if (kvm->arch.nx_lpage_recovery_thread)
+ kthread_stop(kvm->arch.nx_lpage_recovery_thread);
+}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 11f8ec89433b..d55674f44a18 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -210,4 +210,8 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+
+int kvm_mmu_post_init_vm(struct kvm *kvm);
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+
#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7d5cdb3af594..97b21e7fd013 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -614,13 +614,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int write_fault, int hlevel,
- kvm_pfn_t pfn, bool map_writable, bool prefault)
+ kvm_pfn_t pfn, bool map_writable, bool prefault,
+ bool lpage_disallowed)
{
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access;
int top_level, ret;
- gfn_t base_gfn;
+ gfn_t gfn, base_gfn;
direct_access = gw->pte_access;
@@ -665,13 +666,25 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(vcpu, it.sptep, sp);
}
- base_gfn = gw->gfn;
+ /*
+ * FNAME(page_fault) might have clobbered the bottom bits of
+ * gw->gfn, restore them from the virtual address.
+ */
+ gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
+ base_gfn = gfn;
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
clear_sp_write_flooding_count(it.sptep);
- base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
+
+ base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == hlevel)
break;
@@ -683,6 +696,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
it.level - 1, true, direct_access);
link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
}
}
@@ -759,9 +774,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
kvm_pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- bool force_pt_level = false;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ bool force_pt_level = lpage_disallowed;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -851,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
- level, pfn, map_writable, prefault);
+ level, pfn, map_writable, prefault, lpage_disallowed);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock:
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5d21a4ab28cf..04a8212704c1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1268,6 +1268,18 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
+ /*
+ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+ * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
+ * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
+ * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
+ * correctly.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
+ pi_clear_sn(pi_desc);
+ goto after_clear_sn;
+ }
+
/* The full case. */
do {
old.control = new.control = pi_desc->control;
@@ -1283,6 +1295,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
+after_clear_sn:
+
/*
* Clear SN before reading the bitmap. The VT-d firmware
* writes the bitmap and reads SN atomically (5.2.3 in the
@@ -1291,7 +1305,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
*/
smp_mb__after_atomic();
- if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
+ if (!pi_is_pir_empty(pi_desc))
pi_set_on(pi_desc);
}
@@ -6137,7 +6151,7 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
if (pi_test_on(&vmx->pi_desc)) {
pi_clear_on(&vmx->pi_desc);
/*
- * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+ * IOMMU can write to PID.ON, so the barrier matters even on UP.
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
@@ -6167,7 +6181,10 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_test_on(vcpu_to_pi_desc(vcpu));
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ return pi_test_on(pi_desc) ||
+ (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
}
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index bee16687dc0b..5a0f34b1e226 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -355,6 +355,11 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
+static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
+{
+ return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+}
+
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
set_bit(POSTED_INTR_SN,
@@ -373,6 +378,12 @@ static inline void pi_clear_on(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ff395f812719..5d530521f11d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -213,6 +213,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages, .mode = 0444) },
+ { "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) },
{ "max_mmu_page_hash_collisions",
VM_STAT(max_mmu_page_hash_collisions) },
{ NULL }
@@ -1132,13 +1133,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
*
- * This list is modified at module load time to reflect the
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
+ * extract the supported MSRs from the related const lists.
+ * msrs_to_save is selected from the msrs_to_save_all to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
+ * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
* may depend on host virtualization features rather than host cpu features.
*/
-static u32 msrs_to_save[] = {
+static const u32 msrs_to_save_all[] = {
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
@@ -1179,9 +1182,10 @@ static u32 msrs_to_save[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
};
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
static unsigned num_msrs_to_save;
-static u32 emulated_msrs[] = {
+static const u32 emulated_msrs_all[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
@@ -1220,7 +1224,7 @@ static u32 emulated_msrs[] = {
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
* We always support the "true" VMX control MSRs, even if the host
* processor does not, so I am putting these registers here rather
- * than in msrs_to_save.
+ * than in msrs_to_save_all.
*/
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
@@ -1239,13 +1243,14 @@ static u32 emulated_msrs[] = {
MSR_KVM_POLL_CONTROL,
};
+static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
static unsigned num_emulated_msrs;
/*
* List of msr numbers which are used to expose MSR-based features that
* can be used by a hypervisor to validate requested CPU features.
*/
-static u32 msr_based_features[] = {
+static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
MSR_IA32_VMX_PINBASED_CTLS,
@@ -1270,6 +1275,7 @@ static u32 msr_based_features[] = {
MSR_IA32_ARCH_CAPABILITIES,
};
+static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
static unsigned int num_msr_based_features;
static u64 kvm_get_arch_capabilities(void)
@@ -1280,6 +1286,14 @@ static u64 kvm_get_arch_capabilities(void)
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
/*
+ * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
+ * the nested hypervisor runs with NX huge pages. If it is not,
+ * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+ * L1 guests, so it need not worry about its own (L2) guests.
+ */
+ data |= ARCH_CAP_PSCHANGE_MC_NO;
+
+ /*
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
* If an outer hypervisor is doing the cache flush for us
@@ -1298,6 +1312,25 @@ static u64 kvm_get_arch_capabilities(void)
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
+ /*
+ * On TAA affected systems, export MDS_NO=0 when:
+ * - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1.
+ * - Updated microcode is present. This is detected by
+ * the presence of ARCH_CAP_TSX_CTRL_MSR and ensures
+ * that VERW clears CPU buffers.
+ *
+ * When MDS_NO=0 is exported, guests deploy clear CPU buffer
+ * mitigation and don't complain:
+ *
+ * "Vulnerable: Clear CPU buffers attempted, no microcode"
+ *
+ * If TSX is disabled on the system, guests are also mitigated against
+ * TAA and clear CPU buffer mitigation is not required for guests.
+ */
+ if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) &&
+ (data & ARCH_CAP_TSX_CTRL_MSR))
+ data &= ~ARCH_CAP_MDS_NO;
+
return data;
}
@@ -5090,22 +5123,26 @@ static void kvm_init_msr_list(void)
{
struct x86_pmu_capability x86_pmu;
u32 dummy[2];
- unsigned i, j;
+ unsigned i;
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
- "Please update the fixed PMCs in msrs_to_save[]");
+ "Please update the fixed PMCs in msrs_to_saved_all[]");
perf_get_x86_pmu_capability(&x86_pmu);
- for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
- if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
+ num_msrs_to_save = 0;
+ num_emulated_msrs = 0;
+ num_msr_based_features = 0;
+
+ for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
+ if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue;
/*
* Even MSRs that are valid in the host may not be exposed
* to the guests in some cases.
*/
- switch (msrs_to_save[i]) {
+ switch (msrs_to_save_all[i]) {
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported())
continue;
@@ -5133,17 +5170,17 @@ static void kvm_init_msr_list(void)
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
if (!kvm_x86_ops->pt_supported() ||
- msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >=
+ msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue;
break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
- if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+ if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
- if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+ if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
}
@@ -5151,34 +5188,25 @@ static void kvm_init_msr_list(void)
break;
}
- if (j < i)
- msrs_to_save[j] = msrs_to_save[i];
- j++;
+ msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
}
- num_msrs_to_save = j;
- for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
- if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+ for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
continue;
- if (j < i)
- emulated_msrs[j] = emulated_msrs[i];
- j++;
+ emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
}
- num_emulated_msrs = j;
- for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+ for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
struct kvm_msr_entry msr;
- msr.index = msr_based_features[i];
+ msr.index = msr_based_features_all[i];
if (kvm_get_msr_feature(&msr))
continue;
- if (j < i)
- msr_based_features[j] = msr_based_features[i];
- j++;
+ msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
}
- num_msr_based_features = j;
}
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -9428,6 +9456,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -9456,6 +9485,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return kvm_x86_ops->vm_init(kvm);
}
+int kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return kvm_mmu_post_init_vm(kvm);
+}
+
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
@@ -9557,6 +9591,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+ kvm_mmu_pre_destroy_vm(kvm);
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
if (current->mm == kvm->mm) {
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0319d6339822..0c6214497fcc 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
}
}
+
+static
+void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+ /*
+ * To prevent bfqq's service guarantees from being violated,
+ * bfqq may be left busy, i.e., queued for service, even if
+ * empty (see comments in __bfq_bfqq_expire() for
+ * details). But, if no process will send requests to bfqq any
+ * longer, then there is no point in keeping bfqq queued for
+ * service. In addition, keeping bfqq queued for service, but
+ * with no process ref any longer, may have caused bfqq to be
+ * freed when dequeued from service. But this is assumed to
+ * never happen.
+ */
+ if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfqq != bfqd->in_service_queue)
+ bfq_del_bfqq_busy(bfqd, bfqq, false);
+
+ bfq_put_queue(bfqq);
+}
+
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
*/
new_bfqq->pid = -1;
bfqq->bic = NULL;
- /* release process reference to bfqq */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq); /* release process reference */
+ bfq_release_process_ref(bfqd, bfqq);
}
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
- /* release process reference on this queue */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false);
}
@@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL;
}
diff --git a/block/bio.c b/block/bio.c
index 8f0ed6228fc5..b1170ec18464 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0) {
+ if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5d21027b1faf..1eb8895be4c6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
int i;
bool has_stats = false;
+ spin_lock_irq(&blkg->q->queue_lock);
+
+ if (!blkg->online)
+ goto skip;
+
dname = blkg_dev_name(blkg);
if (!dname)
- continue;
+ goto skip;
/*
* Hooray string manipulation, count is the size written NOT
@@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- spin_lock_irq(&blkg->q->queue_lock);
-
blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes), &rwstat);
rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
@@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
- spin_unlock_irq(&blkg->q->queue_lock);
-
if (rbytes || wbytes || rios || wios) {
has_stats = true;
off += scnprintf(buf+off, size-off,
@@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
seq_commit(sf, -1);
}
}
+ skip:
+ spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a7ed434eae03..e01267f99183 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
atomic64_set(&iocg->active_period, cur_period);
/* already activated or breaking leaf-only constraint? */
- for (i = iocg->level; i > 0; i--)
- if (!list_empty(&iocg->active_list))
+ if (!list_empty(&iocg->active_list))
+ goto succeed_unlock;
+ for (i = iocg->level - 1; i > 0; i--)
+ if (!list_empty(&iocg->ancestors[i]->active_list))
goto fail_unlock;
+
if (iocg->child_active_sum)
goto fail_unlock;
@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
ioc_start_period(ioc, now);
}
+succeed_unlock:
spin_unlock_irq(&ioc->lock);
return true;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index cc37511de866..6265871a4af2 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -554,12 +554,27 @@ ssize_t __weak cpu_show_mds(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -568,6 +583,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
&dev_attr_mds.attr,
+ &dev_attr_tsx_async_abort.attr,
+ &dev_attr_itlb_multihit.attr,
NULL
};
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 55907c27075b..84c4e1f72cbd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
}
return ret;
}
+
+struct for_each_memory_block_cb_data {
+ walk_memory_blocks_func_t func;
+ void *arg;
+};
+
+static int for_each_memory_block_cb(struct device *dev, void *data)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ struct for_each_memory_block_cb_data *cb_data = data;
+
+ return cb_data->func(mem, cb_data->arg);
+}
+
+/**
+ * for_each_memory_block - walk through all present memory blocks
+ *
+ * @arg: argument passed to func
+ * @func: callback for each memory block walked
+ *
+ * This function walks through all present memory blocks, calling func on
+ * each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
+{
+ struct for_each_memory_block_cb_data cb_data = {
+ .func = func,
+ .arg = arg,
+ };
+
+ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
+ for_each_memory_block_cb);
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index f4161064365c..3056f81efca4 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -233,8 +233,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
- /* enable 12 mA drive strenth for 4313 and set chipControl
- register bit 1 */
+ /*
+ * enable 12 mA drive strenth for 4313 and set chipControl
+ * register bit 1
+ */
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_4313_12MA_LED_DRIVE,
BCMA_CCTRL_4313_12MA_LED_DRIVE);
@@ -246,8 +248,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
- /* enable 12 mA drive strenth for 43224 and set chipControl
- register bit 15 */
+ /*
+ * enable 12 mA drive strenth for 43224 and set chipControl
+ * register bit 15
+ */
if (bus->chipinfo.rev == 0) {
bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
~BCMA_CCTRL_43224_GPIO_TOGGLE,
@@ -500,8 +504,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM53572:
/* 5357[ab]0, 43236[ab]0, and 6362b0 */
- /* BCM5357 needs to touch PLL1_PLLCTL[02],
- so offset PLL0_PLLCTL[02] by 6 */
+ /*
+ * BCM5357 needs to touch PLL1_PLLCTL[02],
+ * so offset PLL0_PLLCTL[02] by 6
+ */
phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
@@ -619,8 +625,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
/* LCNXN */
- /* PLL Settings for spur avoidance on/off mode,
- no on2 support for 43228A0 */
+ /*
+ * PLL Settings for spur avoidance on/off mode,
+ * no on2 support for 43228A0
+ */
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x01100014);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b248763a672..a18155cdce41 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
- mutex_unlock(&sock->mutex);
drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 39136675dae5..13527a0b4e44 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
- u8 state, new_state, current_state;
+ u8 state, new_state, uninitialized_var(current_state);
bool has_current_state;
void *p;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 76b73ddf8fd7..10f6368117d8 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work);
+ destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card);
rsxx_dma_destroy(card);
+ destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 813338288453..519788c442ca 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -57,6 +57,7 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7668_data },
{ } /* Terminating entry */
};
+MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CHLPCR 0x4 /* W1S */
#define C_INT_EN_SET BIT(0)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8cc21ad7cf29..ec69e5dd7bd3 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -14,19 +14,33 @@
#define VERSION "0.1"
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
- struct rome_version *ver;
+ struct qca_btsoc_version *ver;
char cmd;
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = sizeof(*edl) + sizeof(*ver);
+ u8 rtype = EDL_APP_VER_RES_EVT;
bt_dev_dbg(hdev, "QCA Version Request");
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen += 1;
+ rtype = EDL_PATCH_VER_REQ_CMD;
+ }
+
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ &cmd, event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Reading QCA version information failed (%d)",
@@ -34,7 +48,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
@@ -48,18 +62,21 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_APP_VER_RES_EVT) {
+ edl->rtype != rtype) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
edl->rtype);
err = -EIO;
goto out;
}
- ver = (struct rome_version *)(edl->data);
+ if (soc_type == QCA_WCN3991)
+ memmove(&edl->data, &edl->data[1], sizeof(*ver));
+
+ ver = (struct qca_btsoc_version *)(edl->data);
BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
- BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+ BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rom_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
/* QCA chipset version can be decided by patch and SoC
@@ -67,7 +84,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
* and lower 2 bytes from patch will be used.
*/
*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
@@ -121,7 +138,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct qca_fw_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -140,8 +157,8 @@ static void qca_tlv_check_data(struct rome_config *config,
BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
BT_DBG("Length\t\t : %d bytes", length);
- config->dnld_mode = ROME_SKIP_EVT_NONE;
- config->dnld_type = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
+ config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
case TLV_TYPE_PATCH:
@@ -223,31 +240,45 @@ static void qca_tlv_check_data(struct rome_config *config,
}
static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
- const u8 *data, enum rome_tlv_dnld_mode mode)
+ const u8 *data, enum qca_tlv_dnld_mode mode,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
struct tlv_seg_resp *tlv_resp;
u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp));
+ u8 rtype = EDL_TVL_DNLD_RES_EVT;
cmd[0] = EDL_PATCH_TLV_REQ_CMD;
cmd[1] = seg_size;
memcpy(cmd + 2, data, seg_size);
- if (mode == ROME_SKIP_EVT_VSE_CC || mode == ROME_SKIP_EVT_VSE)
+ if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE)
return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
cmd);
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen = sizeof(*edl);
+ rtype = EDL_PATCH_TLV_REQ_CMD;
+ }
+
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
@@ -260,13 +291,19 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
goto out;
}
- tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
+ edl->cresp, edl->rtype);
+ err = -EIO;
+ }
- if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+ if (soc_type == QCA_WCN3991)
+ goto out;
+
+ tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (tlv_resp->result) {
bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
edl->cresp, edl->rtype, tlv_resp->result);
- err = -EIO;
}
out:
@@ -301,7 +338,8 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
}
static int qca_download_firmware(struct hci_dev *hdev,
- struct rome_config *config)
+ struct qca_fw_config *config,
+ enum qca_btsoc_type soc_type)
{
const struct firmware *fw;
const u8 *segment;
@@ -328,10 +366,10 @@ static int qca_download_firmware(struct hci_dev *hdev,
remain -= segsize;
/* The last segment is always acked regardless download mode */
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
- config->dnld_mode = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
ret = qca_tlv_send_segment(hdev, segsize, segment,
- config->dnld_mode);
+ config->dnld_mode, soc_type);
if (ret)
goto out;
@@ -344,8 +382,8 @@ static int qca_download_firmware(struct hci_dev *hdev,
* decrease the BT in initialization time. Here we will inject a command
* complete event to avoid a command timeout error message.
*/
- if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
- config->dnld_type == ROME_SKIP_EVT_VSE)
+ if (config->dnld_type == QCA_SKIP_EVT_VSE_CC ||
+ config->dnld_type == QCA_SKIP_EVT_VSE)
ret = qca_inject_cmd_complete_event(hdev);
out:
@@ -382,7 +420,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name)
{
- struct rome_config config;
+ struct qca_fw_config config;
int err;
u8 rom_ver = 0;
@@ -405,7 +443,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/rampatch_%08x.bin", soc_ver);
}
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
@@ -426,7 +464,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 69c5315a65fd..f5795b1a3779 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -56,24 +56,24 @@ enum qca_baudrate {
QCA_BAUDRATE_RESERVED
};
-enum rome_tlv_dnld_mode {
- ROME_SKIP_EVT_NONE,
- ROME_SKIP_EVT_VSE,
- ROME_SKIP_EVT_CC,
- ROME_SKIP_EVT_VSE_CC
+enum qca_tlv_dnld_mode {
+ QCA_SKIP_EVT_NONE,
+ QCA_SKIP_EVT_VSE,
+ QCA_SKIP_EVT_CC,
+ QCA_SKIP_EVT_VSE_CC
};
-enum rome_tlv_type {
+enum qca_tlv_type {
TLV_TYPE_PATCH = 1,
TLV_TYPE_NVM
};
-struct rome_config {
+struct qca_fw_config {
u8 type;
char fwname[64];
uint8_t user_baud_rate;
- enum rome_tlv_dnld_mode dnld_mode;
- enum rome_tlv_dnld_mode dnld_type;
+ enum qca_tlv_dnld_mode dnld_mode;
+ enum qca_tlv_dnld_mode dnld_type;
};
struct edl_event_hdr {
@@ -82,10 +82,10 @@ struct edl_event_hdr {
__u8 data[0];
} __packed;
-struct rome_version {
+struct qca_btsoc_version {
__le32 product_id;
__le16 patch_ver;
- __le16 rome_ver;
+ __le16 rom_ver;
__le32 soc_id;
} __packed;
@@ -125,6 +125,7 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
+ QCA_WCN3991,
QCA_WCN3998,
};
@@ -134,12 +135,14 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name);
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
- return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
+ soc_type == QCA_WCN3998;
}
#else
@@ -155,7 +158,8 @@ static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return -EOPNOTSUPP;
}
-static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index ae9a2047f242..f838537f9f89 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -778,7 +778,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev,
rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)",
le16_to_cpu(entry->offset), entry->len);
break;
- };
+ }
i += sizeof(*entry) + entry->len;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 0f73f6a686cb..0f851c0dde7f 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1424,6 +1424,7 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt" },
+ { .compatible = "brcm,bcm43540-bt" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index fe2e307009f4..cf4a56095817 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0;
} else
@@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index c591a8ba9d93..f10bdf8e1fc5 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -43,7 +43,8 @@
#define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
-#define IBS_TX_IDLE_TIMEOUT_MS 2000
+#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
+#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
/* susclk rate */
@@ -55,6 +56,7 @@
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
+ QCA_SUSPENDING,
};
/* HCI_IBS transmit side sleep protocol states */
@@ -100,6 +102,7 @@ struct qca_data {
struct work_struct ws_tx_vote_off;
unsigned long flags;
struct completion drop_ev_comp;
+ wait_queue_head_t suspend_wait_q;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -437,6 +440,12 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
+ /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
/* No WAKE_ACK, retransmit WAKE */
@@ -496,6 +505,8 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+ init_waitqueue_head(&qca->suspend_wait_q);
+
qca->hu = hu;
init_completion(&qca->drop_ev_comp);
@@ -532,7 +543,7 @@ static int qca_open(struct hci_uart *hu)
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+ qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -647,6 +658,12 @@ static void device_want_to_wakeup(struct hci_uart *hu)
qca->ibs_recv_wakes++;
+ /* Don't wake the rx up when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->rx_ibs_state) {
case HCI_IBS_RX_ASLEEP:
/* Make sure clock is on - we may have turned clock off since
@@ -711,6 +728,8 @@ static void device_want_to_sleep(struct hci_uart *hu)
break;
}
+ wake_up_interruptible(&qca->suspend_wait_q);
+
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
}
@@ -728,6 +747,12 @@ static void device_woke_up(struct hci_uart *hu)
qca->ibs_recv_wacks++;
+ /* Don't react to the wake-up-acknowledgment when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_AWAKE:
/* Expect one if we send 2 WAKEs */
@@ -780,8 +805,10 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
+ * Don't wake the device up when suspending.
*/
- if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
+ test_bit(QCA_SUSPENDING, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -1261,7 +1288,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret)
return ret;
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
@@ -1281,7 +1308,7 @@ static int qca_setup(struct hci_uart *hu)
if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
}
@@ -1339,6 +1366,17 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.num_vregs = 4,
};
+static const struct qca_vreg_data qca_soc_data_wcn3991 = {
+ .soc_type = QCA_WCN3991,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
@@ -1539,9 +1577,103 @@ static void qca_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&qcadev->serdev_hu);
}
+static int __maybe_unused qca_suspend(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+ int ret = 0;
+ u8 cmd;
+
+ set_bit(QCA_SUSPENDING, &qca->flags);
+
+ /* Device is downloading patch or doesn't support in-band sleep. */
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
+ return 0;
+
+ cancel_work_sync(&qca->ws_awake_device);
+ cancel_work_sync(&qca->ws_awake_rx);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_WAKING:
+ del_timer(&qca->wake_retrans_timer);
+ /* Fall through */
+ case HCI_IBS_TX_AWAKE:
+ del_timer(&qca->tx_idle_timer);
+
+ serdev_device_write_flush(hu->serdev);
+ cmd = HCI_IBS_SLEEP_IND;
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
+
+ if (ret < 0) {
+ BT_ERR("Failed to send SLEEP to device");
+ break;
+ }
+
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->ibs_sent_slps++;
+
+ qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ break;
+
+ default:
+ BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ if (ret < 0)
+ goto error;
+
+ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+
+ /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
+ * to sleep, so that the packet does not wake the system later.
+ */
+
+ ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
+ qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
+ msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
+
+ if (ret > 0)
+ return 0;
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+error:
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return ret;
+}
+
+static int __maybe_unused qca_resume(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
@@ -1553,6 +1685,7 @@ static struct serdev_device_driver qca_serdev_driver = {
.driver = {
.name = "hci_uart_qca",
.of_match_table = qca_bluetooth_of_match,
+ .pm = &qca_pm_ops,
},
};
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 80b850ef1bf6..8d53b8ef545c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/kernel.h>
@@ -422,9 +421,7 @@ static int hwrng_fillfn(void *unused)
{
long rc;
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
+ while (!kthread_should_stop()) {
struct hwrng *rng;
rng = get_current_rng();
diff --git a/drivers/char/random.c b/drivers/char/random.c
index de434feb873a..01b8868b9bed 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
-#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_freezable(random_write_wait,
- kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 87083b3a2769..37c22667e831 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -297,7 +297,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
if (mcfr & AT91_PMC_MAINRDY)
return 0;
- usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(MAINF_LOOP_MIN_WAIT);
+ else
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
} while (time_before(prep_time, timeout));
return -ETIMEDOUT;
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 9790ddfa5b3c..86238d5ecb4d 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -43,6 +43,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
};
static const struct clk_programmable_layout sam9x60_programmable_layout = {
+ .pres_mask = 0xff,
.pres_shift = 8,
.css_mask = 0x1f,
.have_slck_mck = 0,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 9bfe9a28294a..fac0ca56d42d 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -76,7 +76,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
writel(tmp | osc->bits->cr_osc32en, sckcr);
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0;
}
@@ -187,7 +190,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
writel(readl(sckcr) | osc->bits->cr_rcen, sckcr);
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0;
}
@@ -288,7 +294,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
writel(tmp, sckcr);
- usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(SLOWCK_SW_TIME_USEC);
+ else
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
return 0;
}
@@ -533,7 +542,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
return 0;
}
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
osc->prepared = true;
return 0;
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 1c1bb39bb04e..b1318e6b655b 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -266,10 +266,11 @@ static int aspeed_g6_clk_enable(struct clk_hw *hw)
/* Enable clock */
if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
- regmap_write(gate->map, get_clock_reg(gate), clk);
- } else {
- /* Use set to clear register */
+ /* Clock is clear to enable, so use set to clear register */
regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
+ } else {
+ /* Clock is set to enable, so use write to set register */
+ regmap_write(gate->map, get_clock_reg(gate), clk);
}
if (gate->reset_idx >= 0) {
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 067ab876911d..172589e94f60 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -638,7 +638,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_CLK_A53_DIV],
clks[IMX8MM_CLK_A53_SRC],
clks[IMX8MM_ARM_PLL_OUT],
- clks[IMX8MM_CLK_24M]);
+ clks[IMX8MM_SYS_PLL1_800M]);
imx_check_clocks(clks, ARRAY_SIZE(clks));
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 47a4b44ba3cb..58b5acee3830 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -610,7 +610,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_CLK_A53_DIV],
clks[IMX8MN_CLK_A53_SRC],
clks[IMX8MN_ARM_PLL_OUT],
- clks[IMX8MN_CLK_24M]);
+ clks[IMX8MN_SYS_PLL1_800M]);
imx_check_clocks(clks, ARRAY_SIZE(clks));
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ea4c791f106d..b3af61cc6fb9 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -343,6 +343,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0_sel",
@@ -353,8 +354,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
- /* This sub-tree is used a parking clock */
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -410,6 +410,7 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
.shift = 2,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0",
@@ -466,6 +467,7 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
.shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn",
@@ -485,6 +487,7 @@ static struct clk_regmap g12a_cpu_clk = {
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
.shift = 11,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
@@ -504,6 +507,7 @@ static struct clk_regmap g12b_cpu_clk = {
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
.shift = 11,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
@@ -523,6 +527,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
.shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0_sel",
@@ -533,6 +538,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -567,6 +573,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
.shift = 2,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0",
@@ -644,6 +651,7 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
.shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn",
@@ -663,6 +671,7 @@ static struct clk_regmap g12b_cpub_clk = {
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
.shift = 11,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk",
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 7cfb998eeb3e..1f9c056e684c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -935,6 +935,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = {
&gxbb_sar_adc_clk_sel.hw
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 7670cc596c74..31466cd1842f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -165,12 +165,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_BUS_CPU,
GATE_SCLK_CPU,
CLKOUT_CMU_CPU,
+ CPLL_CON0,
+ DPLL_CON0,
EPLL_CON0,
EPLL_CON1,
EPLL_CON2,
RPLL_CON0,
RPLL_CON1,
RPLL_CON2,
+ IPLL_CON0,
+ SPLL_CON0,
+ VPLL_CON0,
+ MPLL_CON0,
SRC_TOP0,
SRC_TOP1,
SRC_TOP2,
@@ -1172,8 +1178,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
-
/* CDREX */
GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex",
GATE_BUS_CDREX0, 0, 0, 0),
@@ -1248,6 +1252,15 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
{ DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */
};
+static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
+ { GATE_IP_G3D, 0x3ff, 0x3ff }, /* G3D gates */
+ { SRC_TOP5, 0, BIT(16) }, /* MUX mout_user_aclk_g3d */
+};
+
static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
};
@@ -1320,6 +1333,14 @@ static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
.pd_name = "GSC",
};
+static const struct exynos5_subcmu_info exynos5x_g3d_subcmu = {
+ .gate_clks = exynos5x_g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_g3d_gate_clks),
+ .suspend_regs = exynos5x_g3d_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_g3d_suspend_regs),
+ .pd_name = "G3D",
+};
+
static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
.div_clks = exynos5x_mfc_div_clks,
.nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
@@ -1351,6 +1372,7 @@ static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
&exynos5x_disp_subcmu,
&exynos5x_gsc_subcmu,
+ &exynos5x_g3d_subcmu,
&exynos5x_mfc_subcmu,
&exynos5x_mscl_subcmu,
};
@@ -1358,6 +1380,7 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
&exynos5x_disp_subcmu,
&exynos5x_gsc_subcmu,
+ &exynos5x_g3d_subcmu,
&exynos5x_mfc_subcmu,
&exynos5x_mscl_subcmu,
&exynos5800_mau_subcmu,
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 7824c2ba3d8e..4b1aa9382ad2 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -13,6 +13,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/exynos5433.h>
@@ -5584,6 +5585,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
info->nr_clk_regs);
+ if (!data->clk_save)
+ return -ENOMEM;
data->nr_clk_save = info->nr_clk_regs;
data->clk_suspend = info->suspend_regs;
data->nr_clk_suspend = info->nr_suspend_regs;
@@ -5592,12 +5595,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
if (data->nr_pclks > 0) {
data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
data->nr_pclks, GFP_KERNEL);
-
+ if (!data->pclks) {
+ kfree(data->clk_save);
+ return -ENOMEM;
+ }
for (i = 0; i < data->nr_pclks; i++) {
struct clk *clk = of_clk_get(dev->of_node, i);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ kfree(data->clk_save);
+ while (--i >= 0)
+ clk_put(data->pclks[i]);
return PTR_ERR(clk);
+ }
data->pclks[i] = clk;
}
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index dcac1391767f..ef29582676f6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1224,7 +1224,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
/* Enforce d1 = 0, d2 = 0 for Audio PLL */
val = readl(reg + SUN9I_A80_PLL_AUDIO_REG);
- val &= (BIT(16) & BIT(18));
+ val &= ~(BIT(16) | BIT(18));
writel(val, reg + SUN9I_A80_PLL_AUDIO_REG);
/* Enforce P = 1 for both CPU cluster PLLs */
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index d3a43381a792..27201fd26e44 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1080,8 +1080,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
rate_hw, rate_ops,
gate_hw, &clk_gate_ops,
clkflags |
- data->div[i].critical ?
- CLK_IS_CRITICAL : 0);
+ (data->div[i].critical ?
+ CLK_IS_CRITICAL : 0));
WARN_ON(IS_ERR(clk_data->clks[i]));
}
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index a01ca9395179..f65e16c4f3c4 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
struct clk_init_data init = { NULL };
const char **parent_names = NULL;
struct clk *clk;
- int ret;
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
if (!clk_hw) {
@@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
- ret = ti_clk_add_alias(NULL, clk, node->name);
- if (ret) {
- clk_unregister(clk);
- goto cleanup;
- }
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(parent_names);
return;
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 975995eea15c..b0c0690a5a12 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
* can be from a timer that requires pm_runtime access, which
* will eventually bring us here with timekeeping_suspended,
* during both suspend entry and resume paths. This happens
- * at least on am43xx platform.
+ * at least on am43xx platform. Account for flakeyness
+ * with udelay() by multiplying the timeout value by 2.
*/
if (unlikely(_early_timeout || timekeeping_suspended)) {
if (time->cycles++ < timeout) {
- udelay(1);
+ udelay(1 * 2);
return false;
}
} else {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 354b27d14a19..62812f80b5cc 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -328,12 +328,13 @@ static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
return 0;
}
+static const unsigned int sh_mtu2_channel_offsets[] = {
+ 0x300, 0x380, 0x000,
+};
+
static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
struct sh_mtu2_device *mtu)
{
- static const unsigned int channel_offsets[] = {
- 0x300, 0x380, 0x000,
- };
char name[6];
int irq;
int ret;
@@ -356,7 +357,7 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
return ret;
}
- ch->base = mtu->mapbase + channel_offsets[index];
+ ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
ch->index = index;
return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
@@ -408,7 +409,12 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
}
/* Allocate and setup the channels. */
- mtu->num_channels = 3;
+ ret = platform_irq_count(pdev);
+ if (ret < 0)
+ goto err_unmap;
+
+ mtu->num_channels = min_t(unsigned int, ret,
+ ARRAY_SIZE(sh_mtu2_channel_offsets));
mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
GFP_KERNEL);
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index a562f491b0f8..9318edcd8963 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -268,15 +268,12 @@ static int __init mtk_syst_init(struct device_node *node)
ret = timer_of_init(node, &to);
if (ret)
- goto err;
+ return ret;
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
TIMER_SYNC_TICKS, 0xffffffff);
return 0;
-err:
- timer_of_cleanup(&to);
- return ret;
}
static int __init mtk_gpt_init(struct device_node *node)
@@ -293,7 +290,7 @@ static int __init mtk_gpt_init(struct device_node *node)
ret = timer_of_init(node, &to);
if (ret)
- goto err;
+ return ret;
/* Configure clock source */
mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
@@ -311,9 +308,6 @@ static int __init mtk_gpt_init(struct device_node *node)
mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
return 0;
-err:
- timer_of_cleanup(&to);
- return ret;
}
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 53a51c169451..8ab31702cf6a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -847,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MAX_PERF(min_perf);
value |= HWP_MIN_PERF(min_perf);
- /* Set EPP/EPB to min */
+ /* Set EPP to min */
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- else
- intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 98bc5a4cd5e7..599dec59c6cc 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
break;
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
@@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index b132ab9ad607..715e491dfbc3 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -250,7 +250,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
- hh->hh_len = FWNET_HLEN;
+
+ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 2f1e9da81c1e..3302125e5265 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -362,9 +362,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static int mrfld_irq_init_hw(struct gpio_chip *chip)
+static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
{
- struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@@ -376,8 +375,6 @@ static int mrfld_irq_init_hw(struct gpio_chip *chip)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
-
- return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -400,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
{
const struct mrfld_gpio_pinrange *range;
const char *pinctrl_dev_name;
- struct gpio_irq_chip *girq;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -448,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock);
- girq = &priv->chip.irq;
- girq->chip = &mrfld_irqchip;
- girq->init_hw = mrfld_irq_init_hw;
- girq->parent_handler = mrfld_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = pdev->irq;
- girq->first = irq_base;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
-
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
@@ -484,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
}
+ retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
+ handle_bad_irq, IRQ_TYPE_NONE);
+ if (retval) {
+ dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
+ return retval;
+ }
+
+ mrfld_irq_init_hw(priv);
+
+ gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
+ mrfld_irq_handler);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6614d8a6f4c8..2cdaf3b2a721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
- for (i = 0; i < num_entities; i++)
+ for (i = 0; i < num_entities; i++) {
+ mutex_lock(&ctx->adev->lock_reset);
drm_sched_entity_fini(&ctx->entities[0][i].entity);
+ mutex_unlock(&ctx->adev->lock_reset);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5a1939dbd4e3..7a6c837c0a85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2885,6 +2885,13 @@ fence_driver_init:
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
}
+ /*
+ * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
+ * Otherwise the mgpu fan boost feature will be skipped due to the
+ * gpu instance is counted less.
+ */
+ amdgpu_register_gpu_instance(adev);
+
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2a00a36106b2..e1c15721611a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6ee4021910e2..6d19183b478b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -289,6 +289,7 @@ struct amdgpu_gfx {
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
bool me_fw_write_wait;
+ bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d55f5baa83d3..a042ef471fbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
- amdgpu_register_gpu_instance(adev);
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4d71537a960d..a46090071034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
struct amdgpu_device *adev = psp->adev;
- const struct sdma_firmware_header_v1_0 *sdma_hdr =
- (const struct sdma_firmware_header_v1_0 *)
- adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
- const struct gfx_firmware_header_v1_0 *ce_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- const struct gfx_firmware_header_v1_0 *pfp_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- const struct gfx_firmware_header_v1_0 *me_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- const struct gfx_firmware_header_v1_0 *mec_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- const struct rlc_firmware_header_v2_0 *rlc_hdr =
- (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- const struct smc_firmware_header_v1_0 *smc_hdr =
- (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ struct common_firmware_header *hdr;
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7:
- amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_CE:
- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_PFP:
- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_ME:
- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_MEC1:
- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_RLC_G:
- amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
break;
case AMDGPU_UCODE_ID_SMC:
- amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 8dfc775626a7..53090eae0082 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
+static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
+{
+ adev->gfx.cp_fw_write_wait = false;
+
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ if ((adev->gfx.me_fw_version >= 0x00000046) &&
+ (adev->gfx.me_feature_version >= 27) &&
+ (adev->gfx.pfp_fw_version >= 0x00000068) &&
+ (adev->gfx.pfp_feature_version >= 27) &&
+ (adev->gfx.mec_fw_version >= 0x0000005b) &&
+ (adev->gfx.mec_feature_version >= 27))
+ adev->gfx.cp_fw_write_wait = true;
+ break;
+ default:
+ break;
+ }
+
+ if (adev->gfx.cp_fw_write_wait == false)
+ DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
+ GRBM requires 1-cycle delay in cp firmware\n");
+}
+
+
static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_1 *rlc_hdr;
@@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
}
+ gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
@@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}
+static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ struct amdgpu_device *adev = ring->adev;
+ bool fw_version_ok = false;
+
+ fw_version_ok = adev->gfx.cp_fw_write_wait;
+
+ if (fw_version_ok)
+ gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+ ref, mask, 0x20);
+ else
+ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+ ref, mask);
+}
+
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_tmz = gfx_v10_0_ring_emit_tmz,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_rreg = gfx_v10_0_ring_emit_rreg,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dcadc73bffd2..dfca83a2de47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
+ if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ (adev->gfx.mec_feature_version < 46) ||
+ (adev->gfx.pfp_fw_version < 0x000000b7) ||
+ (adev->gfx.pfp_feature_version < 46))
+ DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
+ GRBM requires 1-cycle delay in cp firmware\n");
+
switch (adev->asic_type) {
case CHIP_VEGA10:
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
@@ -1044,6 +1051,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
+ case CHIP_RENOIR:
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 354e6200ca9a..5c7d5f73f54f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -344,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
- amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
-
- /* wait for the invalidate to complete */
- amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
- 1 << vmid, 1 << vmid);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+ hub->vm_inv_eng0_ack + eng,
+ req, 1 << vmid);
return pd_addr;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0cf7ef44b4b5..9ed178fa241c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index f6e81680dd7e..8493bfbbc148 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
+static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_wreg(ring, reg0, ref);
+ /* wait for a cycle to reset vm_inv_eng*_ack */
+ amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
+ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
static int sdma_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
6 + /* sdma_v5_0_ring_emit_pipeline_sync */
/* sdma_v5_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
@@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.pad_ib = sdma_v5_0_ring_pad_ib,
.emit_wreg = sdma_v5_0_ring_emit_wreg,
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f8ab80c8801b..4ccfcdf8f16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x91;
-
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9c58670d5414..ca20b150afcc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2767,15 +2767,6 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
- /* This second call is needed to reconfigure the DIG
- * as a workaround for the incorrect value being applied
- * from transmitter control.
- */
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
- stream->link->link_enc->funcs->setup(
- stream->link->link_enc,
- pipe_ctx->stream->signal);
-
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index dfb208285a9c..6b2f2f1a1c9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
if (!enc1)
return NULL;
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ if (eng_id >= ENGINE_ID_DIGD)
+ eng_id++;
+ }
+
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 0b461404af6b..3ec5a10a7c4d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bbd8ebd58434..92c393f613d3 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3ef2ac52ce94..2dd2cd87cdbb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1581,8 +1581,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
const struct drm_mode_config_helper_funcs *funcs;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
ktime_t start;
s64 commit_time_ms;
+ unsigned int i, new_self_refresh_mask = 0;
funcs = dev->mode_config.helper_private;
@@ -1602,6 +1605,15 @@ static void commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_dependencies(old_state);
+ /*
+ * We cannot safely access new_crtc_state after
+ * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
+ * self-refresh active beforehand:
+ */
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+ if (new_crtc_state->self_refresh_active)
+ new_self_refresh_mask |= BIT(i);
+
if (funcs && funcs->atomic_commit_tail)
funcs->atomic_commit_tail(old_state);
else
@@ -1610,7 +1622,8 @@ static void commit_tail(struct drm_atomic_state *old_state)
commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0)
drm_self_refresh_helper_update_avg_times(old_state,
- (unsigned long)commit_time_ms);
+ (unsigned long)commit_time_ms,
+ new_self_refresh_mask);
drm_atomic_helper_commit_cleanup_done(old_state);
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index 68f4765a5896..dd33fec5aabd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -133,29 +133,33 @@ out_drop_locks:
* drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
* @state: the state which has just been applied to hardware
* @commit_time_ms: the amount of time in ms that this commit took to complete
+ * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
+ * new state
*
* Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
* update the average entry/exit self refresh times on self refresh transitions.
* These averages will be used when calculating how long to delay before
* entering self refresh mode after activity.
*/
-void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
- unsigned int commit_time_ms)
+void
+drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+ unsigned int commit_time_ms,
+ unsigned int new_self_refresh_mask)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
int i;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
struct ewma_psr_time *time;
if (old_crtc_state->self_refresh_active ==
- new_crtc_state->self_refresh_active)
+ new_self_refresh_active)
continue;
- if (new_crtc_state->self_refresh_active)
+ if (new_self_refresh_active)
time = &sr_data->entry_avg_ms;
else
time = &sr_data->exit_avg_ms;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index e6e8d4a82044..0a08354a6183 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -864,6 +864,13 @@ load_detect:
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 12099760d99e..c002f234ff31 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
+ /* Must happen before power domain init on VLV/CHV */
+ intel_update_rawclk(i915);
+
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 57e9f0ba331b..9b15ac4f2fb6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1256,6 +1256,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(intel_dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1268,7 +1271,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_phy_is_tc(i915, phy) &&
+ intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -5436,6 +5440,12 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index e02f0faecf02..b030f7ae3302 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2565,6 +2565,12 @@ out:
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1cdfe05514c3..e41fd94ae5a9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
+ kfree(ctx->jump_whitelist);
+
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
return ctx;
err_free:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 260d59cc3de8..00537b9d7006 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -192,6 +192,13 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+
+ /** jump_whitelist: Bit array for tracking cmds during cmdparsing
+ * Guarded by struct_mutex
+ */
+ unsigned long *jump_whitelist;
+ /** jump_whitelist_cmds: No of cmd slots available */
+ u32 jump_whitelist_cmds;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b5f6937369ea..e635e1e5f4d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
+ return intel_engine_requires_cmd_parser(eb->engine) ||
+ (intel_engine_using_cmd_parser(eb->engine) &&
+ eb->args->batch_len);
}
static int eb_create(struct i915_execbuffer *eb)
@@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
+static struct i915_vma *
+shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = eb->i915;
+ struct i915_vma * const vma = *eb->vma;
+ struct i915_address_space *vm;
+ u64 flags;
+
+ /*
+ * PPGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (CMDPARSER_USES_GGTT(dev_priv)) {
+ flags = PIN_GLOBAL;
+ vm = &dev_priv->ggtt.vm;
+ } else if (vma->vm->has_read_only) {
+ flags = PIN_USER;
+ vm = vma->vm;
+ i915_gem_object_set_readonly(obj);
+ } else {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+}
+
+static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{
struct intel_engine_pool_node *pool;
struct i915_vma *vma;
+ u64 batch_start;
+ u64 shadow_batch_start;
int err;
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
- err = intel_engine_cmd_parser(eb->engine,
+ vma = shadow_batch_pin(eb, pool->obj);
+ if (IS_ERR(vma))
+ goto err;
+
+ batch_start = gen8_canonical_addr(eb->batch->node.start) +
+ eb->batch_start_offset;
+
+ shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+ err = intel_engine_cmd_parser(eb->gem_context,
+ eb->engine,
eb->batch->obj,
- pool->obj,
+ batch_start,
eb->batch_start_offset,
eb->batch_len,
- is_master);
+ pool->obj,
+ shadow_batch_start);
+
if (err) {
- if (err == -EACCES) /* unhandled chained batch */
+ i915_vma_unpin(vma);
+
+ /*
+ * Unsafe GGTT-backed buffers can still be submitted safely
+ * as non-secure.
+ * For PPGTT backing however, we have no choice but to forcibly
+ * reject unsafe buffers
+ */
+ if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
+ /* Execute original buffer non-secure */
vma = NULL;
else
vma = ERR_PTR(err);
goto err;
}
- vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- goto err;
-
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->batch_start_offset = 0;
+ eb->batch = vma;
+
+ if (CMDPARSER_USES_GGTT(eb->i915))
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+
+ /* eb->batch_len unchanged */
+
vma->private = pool;
return vma;
@@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
@@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
- eb.i915 = to_i915(dev);
+ eb.i915 = i915;
eb.file = file;
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2452,8 +2509,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
+ if (INTEL_GEN(i915) >= 11)
+ return -ENODEV;
+
+ /* Return -EPERM to trigger fallback code on old binaries. */
+ if (!HAS_SECURE_BATCHES(i915))
+ return -EPERM;
+
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return -EPERM;
eb.batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
+ if (eb.batch_len == 0)
+ eb.batch_len = eb.batch->size - eb.batch_start_offset;
+
if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
- vma = eb_parse(&eb, drm_is_current_master(file));
+ vma = eb_parse(&eb);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_vma;
}
-
- if (vma) {
- /*
- * Batch parsed and accepted:
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in
- * the dispatch_execbuffer implementations. We
- * specifically don't want that set on batches the
- * command parser has accepted.
- */
- eb.batch_flags |= I915_DISPATCH_SECURE;
- eb.batch_start_offset = 0;
- eb.batch = vma;
- }
}
- if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
-
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index a82cea95c2f2..9dd8c299cb2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -475,12 +475,13 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck;
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -541,9 +542,15 @@ struct intel_engine_cs {
};
static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1363e069ec83..fac75afed35b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
@@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf)
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
+ i915_rc6_ctx_wa_check(i915);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ }
+
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 728704bbbe18..cea184a7dde9 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
- /* Bypass LLC - Uncached (EHL+) */ \
- MOCS_ENTRY(16, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_1_UC), \
- /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
- MOCS_ENTRY(17, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_1_UC),
/* HW Special Case (Displayable) */
MOCS_ENTRY(61,
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+ LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
};
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 13044c027f27..4bfaefdf548d 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
goto out_free_gem;
}
- i915_gem_object_put(obj);
-
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
file_count(dmabuf->file),
kref_read(&obj->base.refcount));
+ i915_gem_object_put(obj);
+
return dmabuf_fd;
out_free_dmabuf:
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 24555102e198..f24096e27bef 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -53,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
@@ -84,9 +82,9 @@
* in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
*/
/*
@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
/*
* The command's unique identification bits and the bitmask to get them.
@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), ~0u << (opm) }, \
+ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ),
};
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ * 1) Those that touch registers
+ * 2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
+ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
+};
+
static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S);
@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R
#undef W
#undef B
-#undef M
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
};
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
};
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
};
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
};
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
/*
* Register whitelists, sorted by increasing register offset.
*/
@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+ REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG64_IDX(BCS_GPR, 0),
+ REG64_IDX(BCS_GPR, 1),
+ REG64_IDX(BCS_GPR, 2),
+ REG64_IDX(BCS_GPR, 3),
+ REG64_IDX(BCS_GPR, 4),
+ REG64_IDX(BCS_GPR, 5),
+ REG64_IDX(BCS_GPR, 6),
+ REG64_IDX(BCS_GPR, 7),
+ REG64_IDX(BCS_GPR, 8),
+ REG64_IDX(BCS_GPR, 9),
+ REG64_IDX(BCS_GPR, 10),
+ REG64_IDX(BCS_GPR, 11),
+ REG64_IDX(BCS_GPR, 12),
+ REG64_IDX(BCS_GPR, 13),
+ REG64_IDX(BCS_GPR, 14),
+ REG64_IDX(BCS_GPR, 15),
};
#undef REG64
@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
- bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7))
+ if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+ engine->class == COPY_ENGINE_CLASS))
return;
switch (engine->class) {
case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_render_ring_cmds;
+ cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count =
- ARRAY_SIZE(hsw_render_ring_cmds);
+ ARRAY_SIZE(hsw_render_ring_cmd_table);
} else {
- cmd_tables = gen7_render_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
}
if (IS_HASWELL(engine->i915)) {
@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
-
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VIDEO_DECODE_CLASS:
- cmd_tables = gen7_video_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case COPY_ENGINE_CLASS:
- if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_blt_ring_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ if (IS_GEN(engine->i915, 9)) {
+ cmd_tables = gen9_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+ engine->get_cmd_length_mask =
+ gen9_blt_get_cmd_length_mask;
+
+ /* BCS Engine unsafe without parser */
+ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else {
- cmd_tables = gen7_blt_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_HASWELL(engine->i915)) {
+ if (IS_GEN(engine->i915, 9)) {
+ engine->reg_tables = gen9_blt_reg_tables;
+ engine->reg_table_count =
+ ARRAY_SIZE(gen9_blt_reg_tables);
+ } else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
-
- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VIDEO_ENHANCEMENT_CLASS:
- cmd_tables = hsw_vebox_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
+ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
}
/**
@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!intel_engine_needs_cmd_parser(engine))
+ if (!intel_engine_using_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
{
const struct drm_i915_reg_table *table = engine->reg_tables;
+ const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count;
- for (; count > 0; ++table, --count) {
- if (!table->master || is_master) {
- const struct drm_i915_reg_descriptor *reg;
+ for (; !reg && (count > 0); ++table, --count)
+ reg = __find_reg(table->regs, table->num_regs, addr);
- reg = __find_reg(table->regs, table->num_regs, addr);
- if (reg != NULL)
- return reg;
- }
- }
-
- return NULL;
+ return reg;
}
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd, u32 length,
- const bool is_master)
+ const u32 *cmd, u32 length)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
- return false;
- }
-
if (desc->flags & CMD_DESC_REGISTER) {
/*
* Get the distance between individual register offset
@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(engine, is_master, reg_addr);
+ find_reg(engine, reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(const struct i915_gem_context *ctx,
+ u32 *cmd, u32 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ bitmap_zero(ctx->jump_whitelist, batch_cmds);
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master)
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false;
@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
do {
u32 length;
- if (*cmd == MI_BATCH_BUFFER_END) {
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
- drm_clflush_virt_range(ptr,
- (void *)(cmd + 1) - ptr);
- }
+ if (*cmd == MI_BATCH_BUFFER_END)
break;
- }
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
- break;
- }
-
- /*
- * If the batch buffer contains a chained batch, return an
- * error that tells the caller to abort and dispatch the
- * workload as a non-secure batch.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
+ goto err;
}
if (desc->flags & CMD_DESC_FIXED)
@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length,
batch_end - cmd);
ret = -EINVAL;
- break;
+ goto err;
}
- if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
+ goto err;
+ }
+
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+
+ if (ret)
+ goto err;
break;
}
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
+
cmd += length;
+ offset += length;
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
- break;
+ goto err;
}
} while (1);
+ if (needs_clflush_after) {
+ void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+
+ drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
+ }
+
+err:
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
}
@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf.
+ * 10. Support for Gen9 BCS Parsing
*/
- return 9;
+ return 10;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bb6f86c7067a..3d717e282908 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
if (ret)
goto cleanup_vga_client;
- /* must happen before intel_power_domains_init_hw() on VLV/CHV */
- intel_update_rawclk(dev_priv);
-
intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
@@ -1850,6 +1847,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
+ i915_rc6_ctx_wa_suspend(dev_priv);
+
intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
@@ -2053,6 +2052,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
+ i915_rc6_ctx_wa_resume(dev_priv);
+
intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 953e1d12c23c..89b6112bd66b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -593,6 +593,8 @@ struct intel_rps {
struct intel_rc6 {
bool enabled;
+ bool ctx_corrupted;
+ intel_wakeref_t ctx_corrupted_wakeref;
u64 prev_hw_residency[4];
u64 cur_residency[4];
};
@@ -2075,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2110,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
+ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || \
- IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+ (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -2284,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+struct i915_vma * __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags);
+
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check
@@ -2393,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master);
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
/* intel_device_info.c */
static inline struct intel_device_info *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0f94f239919..98305d987ac1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm;
+
+ return i915_gem_object_pin(obj, vm, view, size, alignment,
+ flags | PIN_GLOBAL);
+}
+
+struct i915_vma *
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
@@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin(vma, size, alignment, flags);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5d9101376a3d..9f1517af5b7f 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
+ value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(i915);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abd199093c5..f8ee9aba3955 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1 << 13)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
@@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define BCS_SWCTRL _MMIO(0x22200)
+/* There are 16 GPR registers */
+#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
@@ -7211,6 +7217,10 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 75ee027abb80..2efe1d12d5a9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+ /*
+ * Lower the display internal timeout.
+ * This is needed to avoid any hard hangs when DSI port PLL
+ * is off and a MMIO access is attempted by any privilege
+ * application, using batch buffers or any other means.
+ */
+ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
+{
+ return !I915_READ(GEN8_RC6_CTX_INFO);
+}
+
+static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ i915->gt_pm.rc6.ctx_corrupted = true;
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+ }
+}
+
+static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
+{
+ if (i915->gt_pm.rc6.ctx_corrupted) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ i915->gt_pm.rc6.ctx_corrupted_wakeref);
+ i915->gt_pm.rc6.ctx_corrupted = false;
+ }
+}
+
+/**
+ * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
+ */
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
+{
+ if (i915->gt_pm.rc6.ctx_corrupted)
+ intel_runtime_pm_put(&i915->runtime_pm,
+ i915->gt_pm.rc6.ctx_corrupted_wakeref);
+}
+
+/**
+ * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
+{
+ if (!i915->gt_pm.rc6.ctx_corrupted)
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+ return;
+ }
+
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ i915->gt_pm.rc6.ctx_corrupted = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv);
+
+/**
+ * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @i915: i915 device
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+ *
+ * Return false if no context corruption has happened since the last call of
+ * this function, true otherwise.
+*/
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return false;
+
+ if (i915->gt_pm.rc6.ctx_corrupted)
+ return false;
+
+ if (!i915_rc6_ctx_corrupted(i915))
+ return false;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_disable_rc6(i915);
+ i915->gt_pm.rc6.ctx_corrupted = true;
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get_noresume(&i915->runtime_pm);
+
+ return true;
+}
+
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_get(&dev_priv->drm.pdev->dev);
}
+ i915_rc6_ctx_wa_init(dev_priv);
+
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
@@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
+ i915_rc6_ctx_wa_cleanup(dev_priv);
+
if (!HAS_RC6(dev_priv))
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
@@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
i915->gt_pm.llc_pstate.enabled = false;
}
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
@@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = false;
}
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ mutex_lock(&rps->lock);
+ __intel_disable_rc6(dev_priv);
+ mutex_unlock(&rps->lock);
+}
+
static void intel_disable_rps(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
@@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->gt_pm.rps.lock);
- intel_disable_rc6(dev_priv);
+ __intel_disable_rc6(dev_priv);
intel_disable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_disable_llc_pstate(dev_priv);
@@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
if (dev_priv->gt_pm.rc6.enabled)
return;
+ if (dev_priv->gt_pm.rc6.ctx_corrupted)
+ return;
+
if (IS_CHERRYVIEW(dev_priv))
cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index e3573e1e16e3..0f7390c850ec 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 460fd98e40a7..a0b382a637a6 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 04c721d0d3b9..b89439ed210d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 6;
+ tcon->dclk_min_div = 1;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d9c55e30f986..04c088131e04 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -447,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "failed to reset device.\n");
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ goto out_unlock;
}
+ /* At least some SIS devices need this after reset */
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
out_unlock:
mutex_unlock(&ihid->reset_lock);
return ret;
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 4a7f8d363220..203d27d198b8 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
}
}
+/*
+ * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes
+ * the normally-helpful work of 'hid_snto32' for fields that use signed
+ * ranges for questionable reasons.
+ */
+static inline __u32 wacom_s32tou(s32 value, __u8 n)
+{
+ switch (n) {
+ case 8: return ((__u8)value);
+ case 16: return ((__u16)value);
+ case 32: return ((__u32)value);
+ }
+ return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
+}
+
extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2b0a5b8ca6e6..ccb74529bc78 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
case HID_DG_TOOLSERIALNUMBER:
if (value) {
wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
- wacom_wac->serial[0] |= (__u32)value;
+ wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size);
}
return;
case HID_DG_TWIST:
@@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
return;
case WACOM_HID_WD_SERIALHI:
if (value) {
+ __u32 raw_value = wacom_s32tou(value, field->report_size);
+
wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
- wacom_wac->serial[0] |= ((__u64)value) << 32;
+ wacom_wac->serial[0] |= ((__u64)raw_value) << 32;
/*
* Non-USI EMR devices may contain additional tool type
* information here. See WACOM_HID_WD_TOOLTYPE case for
* more details.
*/
if (value >> 20 == 1) {
- wacom_wac->id[0] |= value & 0xFFFFF;
+ wacom_wac->id[0] |= raw_value & 0xFFFFF;
}
}
return;
@@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
* bitwise OR so the complete value can be built
* up over time :(
*/
- wacom_wac->id[0] |= value;
+ wacom_wac->id[0] |= wacom_s32tou(value, field->report_size);
return;
case WACOM_HID_WD_OFFSETLEFT:
if (features->offset_left && value != features->offset_left)
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 0037e2bdacd6..8a51dcf055ea 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -170,7 +170,7 @@ static inline int ina3221_wait_for_data(struct ina3221_data *ina)
/* Polling the CVRF bit to make sure read data is ready */
return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
- cvrf, cvrf, wait, 100000);
+ cvrf, cvrf, wait, wait * 2);
}
static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b26419dbe840..281c81edabc6 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -82,6 +82,10 @@
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
+#define VOLT_MONITOR_MODE 0x0
+#define THERMAL_DIODE_MODE 0x1
+#define THERMISTOR_MODE 0x3
+
#define ENABLE_TSI BIT(1)
static const unsigned short normal_i2c[] = {
@@ -935,11 +939,16 @@ static int nct7904_probe(struct i2c_client *client,
for (i = 0; i < 4; i++) {
val = (ret >> (i * 2)) & 0x03;
bit = (1 << i);
- if (val == 0) {
+ if (val == VOLT_MONITOR_MODE) {
data->tcpu_mask &= ~bit;
+ } else if (val == THERMAL_DIODE_MODE && i < 2) {
+ data->temp_mode |= bit;
+ data->vsen_mask &= ~(0x06 << (i * 2));
+ } else if (val == THERMISTOR_MODE) {
+ data->vsen_mask &= ~(0x02 << (i * 2));
} else {
- if (val == 0x1 || val == 0x2)
- data->temp_mode |= bit;
+ /* Reserved */
+ data->tcpu_mask &= ~bit;
data->vsen_mask &= ~(0x06 << (i * 2));
}
}
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index fa9d34af87ac..f72803a02391 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -626,6 +626,9 @@ static void intel_th_gth_switch(struct intel_th_device *thdev,
if (!count)
dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
+ /* De-assert the trigger */
+ iowrite32(0, gth->base + REG_CTS_CTL);
+
intel_th_gth_stop(gth, output, false);
intel_th_gth_start(gth, output);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index fc9f15f36ad4..6d240dfae9d9 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -164,7 +164,7 @@ struct msc {
};
static LIST_HEAD(msu_buffer_list);
-static struct mutex msu_buffer_mutex;
+static DEFINE_MUTEX(msu_buffer_mutex);
/**
* struct msu_buffer_entry - internal MSU buffer bookkeeping
@@ -327,7 +327,7 @@ static size_t msc_win_total_sz(struct msc_window *win)
struct msc_block_desc *bdesc = sg_virt(sg);
if (msc_block_wrapped(bdesc))
- return win->nr_blocks << PAGE_SHIFT;
+ return (size_t)win->nr_blocks << PAGE_SHIFT;
size += msc_total_sz(bdesc);
if (msc_block_last_written(bdesc))
@@ -1848,9 +1848,14 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
len = cp - buf;
mode = kstrndup(buf, len, GFP_KERNEL);
+ if (!mode)
+ return -ENOMEM;
+
i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
- if (i >= 0)
+ if (i >= 0) {
+ kfree(mode);
goto found;
+ }
/* Buffer sinks only work with a usable IRQ */
if (!msc->do_irq) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 91dfeba62485..03ca5b1bef9f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -200,6 +200,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Comet Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Ice Lake NNPI */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -209,6 +214,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Jasper Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 663f8a5012d6..73aee5949b6b 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
- dmaengine_terminate_all(adc->dma_chan);
+ dmaengine_terminate_sync(adc->dma_chan);
return ret;
}
@@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
stm32_adc_conv_irq_disable(adc);
if (adc->dma_chan)
- dmaengine_terminate_all(adc->dma_chan);
+ dmaengine_terminate_sync(adc->dma_chan);
if (stm32_adc_set_trig(indio_dev, NULL))
dev_err(&indio_dev->dev, "Can't clear trigger\n");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index b99d73887c9f..8743b2f376e2 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
struct adis16480 *st = iio_priv(indio_dev);
unsigned int t, reg;
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
t = val * 1000 + val2 / 1000;
- if (t <= 0)
+ if (t == 0)
return -EINVAL;
/*
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b17f060b52fc..868281b8adb0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
.name = "MPU6050",
.reg = &reg_set_6050,
.config = &chip_config_6050,
+ .fifo_size = 1024,
},
{
.whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
.config = &chip_config_6050,
+ .fifo_size = 512,
},
{
.whoami = INV_MPU6515_WHOAMI_VALUE,
.name = "MPU6515",
.reg = &reg_set_6500,
.config = &chip_config_6050,
+ .fifo_size = 512,
},
{
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
.config = &chip_config_6050,
+ .fifo_size = 1024,
},
{
.whoami = INV_MPU9150_WHOAMI_VALUE,
.name = "MPU9150",
.reg = &reg_set_6050,
.config = &chip_config_6050,
+ .fifo_size = 1024,
},
{
.whoami = INV_MPU9250_WHOAMI_VALUE,
.name = "MPU9250",
.reg = &reg_set_6500,
.config = &chip_config_6050,
+ .fifo_size = 512,
},
{
.whoami = INV_MPU9255_WHOAMI_VALUE,
.name = "MPU9255",
.reg = &reg_set_6500,
.config = &chip_config_6050,
+ .fifo_size = 512,
},
{
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
.config = &chip_config_6050,
+ .fifo_size = 512,
},
{
.whoami = INV_ICM20602_WHOAMI_VALUE,
.name = "ICM20602",
.reg = &reg_set_icm20602,
.config = &chip_config_6050,
+ .fifo_size = 1008,
},
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index db1c6904388b..51235677c534 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
* @name: name of the chip.
* @reg: register map of the chip.
* @config: configuration of the chip.
+ * @fifo_size: size of the FIFO in bytes.
*/
struct inv_mpu6050_hw {
u8 whoami;
u8 *name;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
+ size_t fifo_size;
};
/*
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 5f9a5de0bab4..72d8c5790076 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
- /* handle fifo overflow by reseting fifo */
- if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
- goto flush_fifo;
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
dev_warn(regmap_get_device(st->map),
"spurious interrupt with status 0x%x\n", int_status);
@@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (result)
goto end_session;
fifo_count = get_unaligned_be16(&data[0]);
+
+ /*
+ * Handle fifo overflow by resetting fifo.
+ * Reset if there is only 3 data set free remaining to mitigate
+ * possible delay between reading fifo count and fifo data.
+ */
+ nb = 3 * bytes_per_datum;
+ if (fifo_count >= st->hw->fifo_size - nb) {
+ dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
+ goto flush_fifo;
+ }
+
/* compute and process all complete datum */
nb = fifo_count / bytes_per_datum;
inv_mpu6050_update_period(st, pf->timestamp, nb);
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 8b50d56b0a03..01eb8cc63076 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
- /* it cannot take more than 20 ms */
+ /* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
if (ret < 0) {
mutex_unlock(&data->lock);
@@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
return -ETIMEDOUT;
}
- ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
+ /* it cannot take more than 50 ms until echo is falling */
+ ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
if (ret < 0) {
mutex_unlock(&data->lock);
return ret;
@@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
dt_ns = ktime_to_ns(ktime_dt);
/*
- * measuring more than 3 meters is beyond the capabilities of
- * the sensor
+ * measuring more than 6,45 meters is beyond the capabilities of
+ * the supported sensors
* ==> filter out invalid results for not measuring echos of
* another us sensor
*
* formula:
- * distance 3 m
- * time = ---------- = --------- = 9404389 ns
- * speed 319 m/s
+ * distance 6,45 * 2 m
+ * time = ---------- = ------------ = 40438871 ns
+ * speed 319 m/s
*
* using a minimum speed at -20 °C of 319 m/s
*/
- if (dt_ns > 9404389)
+ if (dt_ns > 40438871)
return -EIO;
time_ns = dt_ns;
@@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
* with Temp in °C
* and speed in m/s
*
- * use 343 m/s as ultrasonic speed at 20 °C here in absence of the
+ * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
* temperature
*
* therefore:
- * time 343
- * distance = ------ * -----
- * 10^6 2
+ * time 343,5 time * 106
+ * distance = ------ * ------- = ------------
+ * 10^6 2 617176
* with time in ns
* and distance in mm (one way)
*
- * because we limit to 3 meters the multiplication with 343 just
+ * because we limit to 6,45 meters the multiplication with 106 just
* fits into 32 bit
*/
- distance_mm = time_ns * 343 / 2000000;
+ distance_mm = time_ns * 106 / 617176;
return distance_mm;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 71cb9525c074..26b792bb1027 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
goto bail_dev;
}
- hfi1_compute_tid_rdma_flow_wt();
/*
* These must be called before the driver is registered with
* the PCI subsystem.
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61aa5504d7c3..61362bd6d3ce 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/
- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ if (parent &&
+ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0;
}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 513a8aac9ccd..1a3c647675a7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail_stop;
rdi = ib_to_rvt(qp->ibqp.device);
- if (qp->s_rnr_retry == 0 &&
- !((rdi->post_parms[wqe->wr.opcode].flags &
- RVT_OPERATION_IGN_RNR_CNT) &&
- qp->s_rnr_retry_cnt == 0)) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
+ if (!(rdi->post_parms[wqe->wr.opcode].flags &
+ RVT_OPERATION_IGN_RNR_CNT)) {
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+ qp->s_rnr_retry--;
}
- if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
- qp->s_rnr_retry--;
/*
* The last valid PSN is the previous PSN. For TID RDMA WRITE
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index f21fca3617d5..e53f542b60af 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
* C - Capcode
*/
-static u32 tid_rdma_flow_wt;
-
static void tid_rdma_trigger_resume(struct work_struct *work);
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
@@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
struct tid_rdma_flow *flow,
bool fecn);
+static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
+{
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+}
+
+static void tid_rdma_schedule_ack(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+}
+
+static void tid_rdma_trigger_ack(struct rvt_qp *qp)
+{
+ validate_r_tid_ack(qp->priv);
+ tid_rdma_schedule_ack(qp);
+}
+
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{
return
@@ -3005,10 +3023,7 @@ nak_psn:
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
/* We are NAK'ing the next expected PSN */
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
- qpriv->r_tid_ack = qpriv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto unlock;
}
@@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
}
-void hfi1_compute_tid_rdma_flow_wt(void)
+static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
{
/*
* Heuristic for computing the RNR timeout when waiting on the flow
* queue. Rather than a computationaly expensive exact estimate of when
* a flow will be available, we assume that if a QP is at position N in
* the flow queue it has to wait approximately (N + 1) * (number of
- * segments between two sync points), assuming PMTU of 4K. The rationale
- * for this is that flows are released and recycled at each sync point.
+ * segments between two sync points). The rationale for this is that
+ * flows are released and recycled at each sync point.
*/
- tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
- TID_RDMA_MAX_SEGMENT_SIZE;
+ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
}
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
@@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
if (ret) {
- to_seg = tid_rdma_flow_wt *
+ to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
position_in_queue(qpriv,
&rcd->flow_queue);
break;
@@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
/*
* If overtaking req->acked_tail, send an RNR NAK. Because the
* QP is not queued in this case, and the issue can only be
- * caused due a delay in scheduling the second leg which we
+ * caused by a delay in scheduling the second leg which we
* cannot estimate, we use a rather arbitrary RNR timeout of
* (MAX_FLOWS / 2) segments
*/
@@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
MAX_FLOWS)) {
ret = -EAGAIN;
to_seg = MAX_FLOWS >> 1;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
break;
}
@@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
req);
trace_hfi1_tid_write_rsp_rcv_data(qp);
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
+ validate_r_tid_ack(priv);
if (opcode == TID_OP(WRITE_DATA_LAST)) {
release_rdma_sge_mr(e);
@@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
}
done:
- priv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_schedule_ack(qp);
exit:
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
if (fecn)
@@ -4388,10 +4399,7 @@ send_nak:
if (!priv->s_nak_state) {
priv->s_nak_state = IB_NAK_PSN_ERROR;
priv->s_nak_psn = flow->flow_state.r_next_psn;
- priv->s_flags |= RVT_S_ACK_PENDING;
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto done;
}
@@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
qpriv->resync = true;
/* RESYNC request always gets a TID RDMA ACK. */
qpriv->s_nak_state = 0;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
bail:
if (fecn)
qp->s_flags |= RVT_S_ECN;
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
index 1c536185261e..6e82df2190b7 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -17,6 +17,7 @@
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
+#define TID_RDMA_SEGMENT_SHIFT 18
/*
* Bit definitions for priv->s_flags.
@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
-void hfi1_compute_tid_rdma_flow_wt(void);
-
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 86783276fb1f..3bb8f78fb7b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -59,7 +59,7 @@ enum {
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist)))
+ (sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 9591457eb768..43ea2c13b212 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -376,7 +376,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge;
- srq_desc_size = max(16, 16 * srq->max_gs);
+ srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 74ce9249e75a..5c3d052ac30b 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -35,7 +35,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
int vport_index;
if (rep->vport == MLX5_VPORT_UPLINK)
- profile = &uplink_rep_profile;
+ profile = &raw_eth_profile;
else
return mlx5_ib_set_vport_rep(dev, rep);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index de43b423bafc..3b6750cba796 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -10,7 +10,7 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
-extern const struct mlx5_ib_profile uplink_rep_profile;
+extern const struct mlx5_ib_profile raw_eth_profile;
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 831539419c30..46ea4f0b9b51 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1031,7 +1031,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
- if (!mlx5_core_is_pf(mdev))
+ if (mlx5_core_is_vf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
@@ -5145,8 +5145,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
- if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -5249,11 +5248,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- if (MLX5_CAP_GEN(dev->mdev, roce)) {
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- return err;
- }
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
err = mlx5_eth_lag_init(dev);
if (err)
@@ -5262,8 +5259,7 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
return 0;
err_disable_roce:
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
return err;
}
@@ -5271,8 +5267,7 @@ err_disable_roce:
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
mlx5_eth_lag_cleanup(dev);
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
struct mlx5_ib_counter {
@@ -6444,7 +6439,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.query_port = mlx5_ib_rep_query_port,
};
-static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
{
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
return 0;
@@ -6484,7 +6479,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_remove_netdev_notifier(dev, port_num);
}
-static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
@@ -6500,7 +6495,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
return err;
}
-static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_stage_common_roce_cleanup(dev);
}
@@ -6807,7 +6802,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
-const struct mlx5_ib_profile uplink_rep_profile = {
+const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@@ -6818,11 +6813,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
mlx5_ib_stage_caps_init,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
- mlx5_ib_stage_rep_non_default_cb,
+ mlx5_ib_stage_raw_eth_non_default_cb,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
- mlx5_ib_stage_rep_roce_init,
- mlx5_ib_stage_rep_roce_cleanup),
+ mlx5_ib_stage_raw_eth_roce_init,
+ mlx5_ib_stage_raw_eth_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -6898,6 +6893,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
+ const struct mlx5_ib_profile *profile;
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
@@ -6933,7 +6929,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev = mdev;
dev->num_ports = num_ports;
- return __mlx5_ib_add(dev, &pf_profile);
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ profile = &raw_eth_profile;
+ else
+ profile = &pf_profile;
+
+ return __mlx5_ib_add(dev, profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 1cb40c7475af..8229a9006917 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
{
struct ml_device *ml = ff->private;
+ /*
+ * Even though we stop all playing effects when tearing down
+ * an input device (via input_device_flush() that calls into
+ * input_ff_flush() that stops and erases all effects), we
+ * do not actually stop the timer, and therefore we should
+ * do it here.
+ */
+ del_timer_sync(&ml->timer);
+
kfree(ml->private);
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 56fae3472114..704558d449a2 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -177,6 +177,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */
+ "LEN0402", /* X1 Extreme 2nd Generation */
"LEN200f", /* T450s */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f28a7158b2ef..bbf9ae9f3f0c 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -510,7 +510,6 @@ struct f11_data {
struct rmi_2d_sensor_platform_data sensor_pdata;
unsigned long *abs_mask;
unsigned long *rel_mask;
- unsigned long *result_bits;
};
enum f11_finger_state {
@@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
/*
** init instance data, fill in values and create any sysfs files
*/
- f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+ f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
GFP_KERNEL);
if (!f11)
return -ENOMEM;
@@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
+ sizeof(struct f11_data));
f11->rel_mask = (unsigned long *)((char *)f11
+ sizeof(struct f11_data) + mask_size);
- f11->result_bits = (unsigned long *)((char *)f11
- + sizeof(struct f11_data) + mask_size * 2);
set_bit(fn->irq_pos, f11->abs_mask);
set_bit(fn->irq_pos + 1, f11->rel_mask);
@@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
valid_bytes = f11->sensor.attn_size;
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += f11->sensor.attn_size;
- drvdata->attn_data.size -= f11->sensor.attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index d20a5d6780d1..7e97944f7616 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -55,6 +55,9 @@ struct f12_data {
const struct rmi_register_desc_item *data15;
u16 data15_offset;
+
+ unsigned long *abs_mask;
+ unsigned long *rel_mask;
};
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
@@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
valid_bytes = sensor->attn_size;
memcpy(sensor->data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += sensor->attn_size;
- drvdata->attn_data.size -= sensor->attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ struct rmi_2d_sensor *sensor;
int ret;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ sensor = &f12->sensor;
+
+ if (!sensor->report_abs)
+ drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
+
+ drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
ret = rmi_f12_write_control_regs(fn);
if (ret)
@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
+ int mask_size;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
if (ret < 0) {
dev_err(&fn->dev, "Failed to read general info register: %d\n",
@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
return -ENODEV;
}
- f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
+ GFP_KERNEL);
if (!f12)
return -ENOMEM;
+ f12->abs_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data));
+ f12->rel_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data) + mask_size);
+
+ set_bit(fn->irq_pos, f12->abs_mask);
+ set_bit(fn->irq_pos + 1, f12->rel_mask);
+
f12->has_dribble = !!(buf & BIT(3));
if (fn->dev.of_node) {
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 710b02595486..897105b9a98b 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
static const struct vb2_queue rmi_f54_queue = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
- .buf_struct_size = sizeof(struct vb2_buffer),
+ .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
return 0;
}
@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
video_unregister_device(&f54->vdev);
v4l2_device_unregister(&f54->v4l2);
+ destroy_workqueue(f54->workqueue);
}
struct rmi_function_handler rmi_f54_handler = {
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 4b22d49a0f49..6bcffc930384 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
/* get sysinfo */
md->si = &cd->sysinfo;
- if (!md->si) {
- dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
- __func__, md->si);
- goto error_get_sysinfo;
- }
rc = cyttsp4_setup_input_device(cd);
if (rc)
@@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
error_init_input:
input_free_device(md->input);
-error_get_sysinfo:
- input_set_drvdata(md->input, NULL);
error_alloc_failed:
dev_err(dev, "%s failed.\n", __func__);
return rc;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 7b971228df38..c498796adc07 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -405,8 +405,12 @@ void icc_set_tag(struct icc_path *path, u32 tag)
if (!path)
return;
+ mutex_lock(&icc_lock);
+
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].tag = tag;
+
+ mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_set_tag);
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 910081d6ddc0..b4966d8f3348 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -433,7 +433,8 @@ static int qnoc_probe(struct platform_device *pdev)
if (!qp)
return -ENOMEM;
- data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 57955596bb59..502a6c22b41e 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -790,7 +790,8 @@ static int qnoc_probe(struct platform_device *pdev)
if (!qp)
return -ENOMEM;
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 304f50c08da2..078eeadf707a 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -10,7 +10,7 @@ config MISDN_HFCPCI
depends on PCI
help
Enable support for cards with Cologne Chip AG's
- HFC PCI chip.
+ HFC PCI chip.
config MISDN_HFCMULTI
tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 819e35995d32..cbb706dabede 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -28,6 +28,10 @@ MODULE_PARM_DESC(disable_guest,
static bool vmci_guest_personality_initialized;
static bool vmci_host_personality_initialized;
+static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */
+static vmci_vsock_cb vmci_vsock_transport_cb;
+static bool vmci_vsock_cb_host_called;
+
/*
* vmci_get_context_id() - Gets the current context ID.
*
@@ -45,6 +49,69 @@ u32 vmci_get_context_id(void)
}
EXPORT_SYMBOL_GPL(vmci_get_context_id);
+/*
+ * vmci_register_vsock_callback() - Register the VSOCK vmci_transport callback.
+ *
+ * The callback will be called when the first host or guest becomes active,
+ * or if they are already active when this function is called.
+ * To unregister the callback, call this function with NULL parameter.
+ *
+ * Returns 0 on success. -EBUSY if a callback is already registered.
+ */
+int vmci_register_vsock_callback(vmci_vsock_cb callback)
+{
+ int err = 0;
+
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (vmci_vsock_transport_cb && callback) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ vmci_vsock_transport_cb = callback;
+
+ if (!vmci_vsock_transport_cb) {
+ vmci_vsock_cb_host_called = false;
+ goto out;
+ }
+
+ if (vmci_guest_code_active())
+ vmci_vsock_transport_cb(false);
+
+ if (vmci_host_users() > 0) {
+ vmci_vsock_cb_host_called = true;
+ vmci_vsock_transport_cb(true);
+ }
+
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmci_register_vsock_callback);
+
+void vmci_call_vsock_callback(bool is_host)
+{
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (!vmci_vsock_transport_cb)
+ goto out;
+
+ /* In the host, this function could be called multiple times,
+ * but we want to register it only once.
+ */
+ if (is_host) {
+ if (vmci_vsock_cb_host_called)
+ goto out;
+
+ vmci_vsock_cb_host_called = true;
+ }
+
+ vmci_vsock_transport_cb(is_host);
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+}
+
static int __init vmci_drv_init(void)
{
int vmci_err;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index aab81b67670c..990682480bf6 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -36,10 +36,12 @@ extern struct pci_dev *vmci_pdev;
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
+void vmci_call_vsock_callback(bool is_host);
int vmci_host_init(void);
void vmci_host_exit(void);
bool vmci_host_code_active(void);
+int vmci_host_users(void);
int vmci_guest_init(void);
void vmci_guest_exit(void);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 7a84a48c75da..cc8eeb361fcd 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -637,6 +637,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
+
+ vmci_call_vsock_callback(false);
return 0;
err_free_irq:
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 833e2bd248a5..ff3c396146ff 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -108,6 +108,11 @@ bool vmci_host_code_active(void)
atomic_read(&vmci_host_active_users) > 0);
}
+int vmci_host_users(void)
+{
+ return atomic_read(&vmci_host_active_users);
+}
+
/*
* Called on open of /dev/vmci.
*/
@@ -338,6 +343,8 @@ static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
atomic_inc(&vmci_host_active_users);
+ vmci_call_vsock_callback(true);
+
retval = 0;
out:
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e7d1920729fb..0ae986c42bc8 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -358,7 +358,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */
- host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0059e6ba6a83..fcb7c2f7f001 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -41,6 +41,8 @@
#include <linux/in.h>
#include <net/ip.h>
#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
@@ -2128,8 +2130,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !rcu_dereference(bond->curr_active_slave);
bond_for_each_slave_rcu(bond, slave, iter) {
- slave->new_link = BOND_LINK_NOCHANGE;
- slave->link_new_state = slave->link;
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2163,7 +2164,7 @@ static int bond_miimon_inspect(struct bonding *bond)
}
if (slave->delay <= 0) {
- slave->new_link = BOND_LINK_DOWN;
+ bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
continue;
}
@@ -2200,7 +2201,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = 0;
if (slave->delay <= 0) {
- slave->new_link = BOND_LINK_UP;
+ bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
ignore_updelay = false;
continue;
@@ -2238,7 +2239,7 @@ static void bond_miimon_commit(struct bonding *bond)
struct slave *slave, *primary;
bond_for_each_slave(bond, slave, iter) {
- switch (slave->new_link) {
+ switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
/* For 802.3ad mode, check current slave speed and
* duplex again in case its port was disabled after
@@ -2310,8 +2311,8 @@ static void bond_miimon_commit(struct bonding *bond)
default:
slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
- slave->new_link);
- slave->new_link = BOND_LINK_NOCHANGE;
+ slave->link_new_state);
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
continue;
}
@@ -2719,13 +2720,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
- slave->new_link = BOND_LINK_NOCHANGE;
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
- slave->new_link = BOND_LINK_UP;
+ bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
@@ -2750,7 +2751,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
- slave->new_link = BOND_LINK_DOWN;
+ bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
@@ -2781,8 +2782,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
goto re_arm;
bond_for_each_slave(bond, slave, iter) {
- if (slave->new_link != BOND_LINK_NOCHANGE)
- slave->link = slave->new_link;
+ if (slave->link_new_state != BOND_LINK_NOCHANGE)
+ slave->link = slave->link_new_state;
}
if (slave_state_changed) {
@@ -2805,9 +2806,9 @@ re_arm:
}
/* Called to inspect slaves for active-backup mode ARP monitor link state
- * changes. Sets new_link in slaves to specify what action should take
- * place for the slave. Returns 0 if no changes are found, >0 if changes
- * to link states must be committed.
+ * changes. Sets proposed link state in slaves to specify what action
+ * should take place for the slave. Returns 0 if no changes are found, >0
+ * if changes to link states must be committed.
*
* Called with rcu_read_lock held.
*/
@@ -2819,12 +2820,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
int commit = 0;
bond_for_each_slave_rcu(bond, slave, iter) {
- slave->new_link = BOND_LINK_NOCHANGE;
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_rx, 1)) {
- slave->new_link = BOND_LINK_UP;
+ bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
}
continue;
@@ -2852,7 +2853,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
!bond_time_in_interval(bond, last_rx, 3)) {
- slave->new_link = BOND_LINK_DOWN;
+ bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
@@ -2865,7 +2866,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (bond_is_active_slave(slave) &&
(!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, last_rx, 2))) {
- slave->new_link = BOND_LINK_DOWN;
+ bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
}
@@ -2885,7 +2886,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
- switch (slave->new_link) {
+ switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2935,8 +2936,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
default:
- slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
- slave->new_link);
+ slave_err(bond->dev, slave->dev,
+ "impossible: link_new_state %d on slave\n",
+ slave->link_new_state);
continue;
}
@@ -3297,12 +3299,42 @@ static inline u32 bond_eth_hash(struct sk_buff *skb)
return 0;
}
+static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
+ int *noff, int *proto, bool l34)
+{
+ const struct ipv6hdr *iph6;
+ const struct iphdr *iph;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
+ return false;
+ iph = (const struct iphdr *)(skb->data + *noff);
+ iph_to_flow_copy_v4addrs(fk, iph);
+ *noff += iph->ihl << 2;
+ if (!ip_is_fragment(iph))
+ *proto = iph->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
+ return false;
+ iph6 = (const struct ipv6hdr *)(skb->data + *noff);
+ iph_to_flow_copy_v6addrs(fk, iph6);
+ *noff += sizeof(*iph6);
+ *proto = iph6->nexthdr;
+ } else {
+ return false;
+ }
+
+ if (l34 && *proto >= 0)
+ fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
+
+ return true;
+}
+
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
struct flow_keys *fk)
{
- const struct ipv6hdr *iph6;
- const struct iphdr *iph;
+ bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
@@ -3314,31 +3346,30 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
fk->ports.ports = 0;
memset(&fk->icmp, 0, sizeof(fk->icmp));
noff = skb_network_offset(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
- return false;
- iph = ip_hdr(skb);
- iph_to_flow_copy_v4addrs(fk, iph);
- noff += iph->ihl << 2;
- if (!ip_is_fragment(iph))
- proto = iph->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
- return false;
- iph6 = ipv6_hdr(skb);
- iph_to_flow_copy_v6addrs(fk, iph6);
- noff += sizeof(*iph6);
- proto = iph6->nexthdr;
- } else {
+ if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
return false;
- }
- if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0) {
- if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6)
- skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
- skb_transport_offset(skb),
- skb_headlen(skb));
- else
- fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+
+ /* ICMP error packets contains at least 8 bytes of the header
+ * of the packet which generated the error. Use this information
+ * to correlate ICMP error packets within the same flow which
+ * generated the error.
+ */
+ if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
+ skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
+ skb_transport_offset(skb),
+ skb_headlen(skb));
+ if (proto == IPPROTO_ICMP) {
+ if (!icmp_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmphdr);
+ } else if (proto == IPPROTO_ICMPV6) {
+ if (!icmpv6_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmp6hdr);
+ }
+ return bond_flow_ip(skb, fk, &noff, &proto, l34);
}
return true;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 606b7d8ffe13..8e9f5620c9a2 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -52,6 +52,7 @@
#define CONTROL_EX_PDR BIT(8)
/* control register */
+#define CONTROL_SWR BIT(15)
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
#define CONTROL_DISABLE_AR BIT(5)
@@ -97,6 +98,9 @@
#define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
+/* interrupt register */
+#define INT_STS_PENDING 0x8000
+
/* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
IF_MCONT_RCV_EOB);
}
+static int c_can_software_reset(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ int retry = 0;
+
+ if (priv->type != BOSCH_D_CAN)
+ return 0;
+
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
+ while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
+ msleep(20);
+ if (retry++ > 100) {
+ netdev_err(dev, "CCTRL: software reset failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
/*
* Configure C_CAN chip:
* - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = c_can_software_reset(dev);
+ if (err)
+ return err;
/* enable automatic retransmission */
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
struct can_berr_counter bec;
switch (error_type) {
+ case C_CAN_NO_ERROR:
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case C_CAN_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
ERR_CNT_RP_SHIFT;
switch (error_type) {
+ case C_CAN_NO_ERROR:
+ /* error warning state */
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case C_CAN_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
u16 curr, last = priv->last_status;
int work_done = 0;
- priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
- /* Ack status on C_CAN. D_CAN is self clearing */
- if (priv->type != BOSCH_D_CAN)
- priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+ /* Only read the status register if a status interrupt was pending */
+ if (atomic_xchg(&priv->sie_pending, 0)) {
+ priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+ /* Ack status on C_CAN. D_CAN is self clearing */
+ if (priv->type != BOSCH_D_CAN)
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+ } else {
+ /* no change detected ... */
+ curr = last;
+ }
/* handle state changes */
if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
/* handle bus recovery events */
if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
netdev_dbg(dev, "left bus off state\n");
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
}
+
if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
netdev_dbg(dev, "left error passive state\n");
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
+ }
+
+ if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
+ netdev_dbg(dev, "left error warning state\n");
+ work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
}
/* handle lec errors on the bus */
@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
+ int reg_int;
- if (!priv->read_reg(priv, C_CAN_INT_REG))
+ reg_int = priv->read_reg(priv, C_CAN_INT_REG);
+ if (!reg_int)
return IRQ_NONE;
+ /* save for later use */
+ if (reg_int & INT_STS_PENDING)
+ atomic_set(&priv->sie_pending, 1);
+
/* disable all interrupts and schedule the NAPI */
c_can_irq_control(priv, false);
napi_schedule(&priv->napi);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 8acdc7fa4792..d5567a7c1c6d 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -198,6 +198,7 @@ struct c_can_priv {
struct net_device *dev;
struct device *device;
atomic_t tx_active;
+ atomic_t sie_pending;
unsigned long tx_dir;
int last_status;
u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index b5145a7f874c..05f425ceb53a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -39,10 +39,11 @@
#include "c_can.h"
-#define DCAN_RAM_INIT_BIT (1 << 3)
+#define DCAN_RAM_INIT_BIT BIT(3)
+
static DEFINE_SPINLOCK(raminit_lock);
-/*
- * 16-bit c_can registers can be arranged differently in the memory
+
+/* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
@@ -54,7 +55,7 @@ static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
@@ -66,7 +67,7 @@ static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
@@ -144,13 +145,13 @@ static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
u32 val;
val = priv->read_reg(priv, index);
- val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+ val |= ((u32)priv->read_reg(priv, index + 1)) << 16;
return val;
}
-static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void c_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
priv->write_reg(priv, index + 1, val >> 16);
priv->write_reg(priv, index, val);
@@ -161,8 +162,8 @@ static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
return readl(priv->base + priv->regs[index]);
}
-static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void d_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
writel(val, priv->base + priv->regs[index]);
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ac86be52b461..6ee06a49fb4c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -553,10 +553,9 @@ static void can_restart(struct net_device *dev)
/* send restart message upstream */
skb = alloc_can_err_skb(dev, &cf);
- if (!skb) {
- err = -ENOMEM;
+ if (!skb)
goto restart;
- }
+
cf->can_id |= CAN_ERR_RESTARTED;
netif_rx(skb);
@@ -848,6 +847,7 @@ void of_can_transceiver(struct net_device *dev)
return;
ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index dc5695dffc2e..a929cdda9ab2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -142,7 +142,7 @@
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
-#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
+#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -277,9 +277,9 @@ struct flexcan_priv {
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u64 rx_mask;
+ u64 tx_mask;
u32 reg_ctrl_default;
- u32 reg_imask1_default;
- u32 reg_imask2_default;
struct clk *clk_ipg;
struct clk *clk_per;
@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
struct can_frame *cf;
bool rx_errors = false, tx_errors = false;
u32 timestamp;
+ int err;
timestamp = priv->read(&regs->timer) << 16;
@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ dev->stats.rx_fifo_errors++;
}
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@@ -738,8 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
int flt;
struct can_berr_counter bec;
u32 timestamp;
-
- timestamp = priv->read(&regs->timer) << 16;
+ int err;
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -760,6 +762,8 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (likely(new_state == priv->can.state))
return;
+ timestamp = priv->read(&regs->timer) << 16;
+
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return;
@@ -769,7 +773,39 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ dev->stats.rx_fifo_errors++;
+}
+
+static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask)
+{
+ u64 reg = 0;
+
+ if (upper_32_bits(mask))
+ reg = (u64)priv->read(addr - 4) << 32;
+ if (lower_32_bits(mask))
+ reg |= priv->read(addr);
+
+ return reg & mask;
+}
+
+static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr)
+{
+ if (upper_32_bits(val))
+ priv->write(upper_32_bits(val), addr - 4);
+ if (lower_32_bits(val))
+ priv->write(lower_32_bits(val), addr);
+}
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask);
+}
+
+static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask);
}
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -777,16 +813,23 @@ static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *off
return container_of(offload, struct flexcan_priv, offload);
}
-static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int n)
+static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
+ unsigned int n, u32 *timestamp,
+ bool drop)
{
struct flexcan_priv *priv = rx_offload_to_priv(offload);
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
+ struct sk_buff *skb;
+ struct can_frame *cf;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -800,7 +843,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
- return 0;
+ return NULL;
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
/* This MB was overrun, we lost data */
@@ -810,11 +853,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
} else {
reg_iflag1 = priv->read(&regs->iflag1);
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
- return 0;
+ return NULL;
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (!skb) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
/* increase timstamp to full 32 bit */
*timestamp = reg_ctrl << 16;
@@ -833,16 +882,11 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*(__be32 *)(cf->data + i) = data;
}
- /* mark as read */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- /* Clear IRQ */
- if (n < 32)
- priv->write(BIT(n), &regs->iflag1);
- else
- priv->write(BIT(n - 32), &regs->iflag2);
- } else {
+ mark_as_read:
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
+ else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- }
/* Read the Free Running Timer. It is optional but recommended
* to unlock Mailbox as soon as possible and make it available
@@ -850,20 +894,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*/
priv->read(&regs->timer);
- return 1;
-}
-
-
-static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
-{
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 iflag1, iflag2;
-
- iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
- iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
-
- return (u64)iflag2 << 32 | iflag1;
+ return skb;
}
static irqreturn_t flexcan_irq(int irq, void *dev_id)
@@ -873,18 +904,19 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
- u32 reg_iflag2, reg_esr;
+ u64 reg_iflag_tx;
+ u32 reg_esr;
enum can_state last_state = priv->can.state;
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 reg_iflag;
+ u64 reg_iflag_rx;
int ret;
- while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
+ while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) {
handled = IRQ_HANDLED;
ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
- reg_iflag);
+ reg_iflag_rx);
if (!ret)
break;
}
@@ -907,10 +939,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
- reg_iflag2 = priv->read(&regs->iflag2);
+ reg_iflag_tx = flexcan_read_reg_iflag_tx(priv);
/* transmission complete interrupt */
- if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ if (reg_iflag_tx & priv->tx_mask) {
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
@@ -922,7 +954,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb->can_ctrl);
- priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag2);
+ flexcan_write64(priv, priv->tx_mask, &regs->iflag1);
netif_wake_queue(dev);
}
@@ -1034,6 +1066,7 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
+ u64 reg_imask;
int err, i;
struct flexcan_mb __iomem *mb;
@@ -1188,6 +1221,7 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr);
+ reg_mecr |= FLEXCAN_MECR_ECCDIS;
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr);
@@ -1207,8 +1241,9 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
priv->write(priv->reg_ctrl_default, &regs->ctrl);
- priv->write(priv->reg_imask1_default, &regs->imask1);
- priv->write(priv->reg_imask2_default, &regs->imask2);
+ reg_imask = priv->rx_mask | priv->tx_mask;
+ priv->write(upper_32_bits(reg_imask), &regs->imask2);
+ priv->write(lower_32_bits(reg_imask), &regs->imask1);
enable_irq(dev->irq);
/* print chip status */
@@ -1276,26 +1311,19 @@ static int flexcan_open(struct net_device *dev)
flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
-
- priv->reg_imask1_default = 0;
- priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+ priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 imask;
-
priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
- imask = GENMASK_ULL(priv->offload.mb_last,
- priv->offload.mb_first);
- priv->reg_imask1_default |= imask;
- priv->reg_imask2_default |= imask >> 32;
-
+ priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
+ priv->offload.mb_first);
err = can_rx_offload_add_timestamp(dev, &priv->offload);
} else {
- priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
err = can_rx_offload_add_fifo(dev, &priv->offload,
FLEXCAN_NAPI_WEIGHT);
@@ -1527,7 +1555,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
- struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
int err, irq;
@@ -1562,12 +1589,11 @@ static int flexcan_probe(struct platform_device *pdev)
clock_freq = clk_get_rate(clk_per);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -ENODEV;
- regs = devm_ioremap_resource(&pdev->dev, mem);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index b8f1f2b69dd3..378200b682fa 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1652,7 +1652,6 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
- struct resource *res;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1672,8 +1671,7 @@ static int grcan_probe(struct platform_device *ofdev)
goto exit_error;
}
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&ofdev->dev, res);
+ base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(base)) {
err = PTR_ERR(base);
goto exit_error;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index fedd927ba6ed..04d59bede5ea 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -942,13 +942,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct ifi_canfd_priv *priv;
- struct resource *res;
void __iomem *addr;
int irq, ret;
u32 id, rev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
irq = platform_get_irq(pdev, 0);
if (IS_ERR(addr) || irq < 0)
return -EINVAL;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 562c8317e3aa..02c5795b7393 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -123,6 +123,7 @@ enum m_can_reg {
#define CCCR_CME_CANFD_BRS 0x2
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
+#define CCCR_DAR BIT(6)
#define CCCR_MON BIT(5)
#define CCCR_CSR BIT(4)
#define CCCR_CSA BIT(3)
@@ -777,6 +778,43 @@ static inline bool is_lec_err(u32 psr)
return psr && (psr != LEC_UNUSED);
}
+static inline bool m_can_is_protocol_err(u32 irqstatus)
+{
+ return irqstatus & IR_ERR_LEC_31X;
+}
+
+static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+
+ /* update tx error stats since there is protocol error */
+ stats->tx_errors++;
+
+ /* update arbitration lost status */
+ if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
+ netdev_dbg(dev, "Protocol error in Arbitration fail\n");
+ cdev->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ netdev_dbg(dev, "allocation of skb failed\n");
+ return 0;
+ }
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
@@ -791,6 +829,11 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
is_lec_err(psr))
work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ /* handle protocol errors in arbitration phase */
+ if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ m_can_is_protocol_err(irqstatus))
+ work_done += m_can_handle_protocol_error(dev, irqstatus);
+
/* other unproccessed error interrupts */
m_can_handle_other_err(dev, irqstatus);
@@ -1135,7 +1178,7 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->version == 30) {
/* Version 3.0.x */
- cccr &= ~(CCCR_TEST | CCCR_MON |
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
(CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
(CCCR_CME_MASK << CCCR_CME_SHIFT));
@@ -1145,7 +1188,7 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
- CCCR_NISO);
+ CCCR_NISO | CCCR_DAR);
/* Only 3.2.x has NISO Bit implemented */
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1165,6 +1208,10 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
+ /* Disable Auto Retransmission (all versions) */
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ cccr |= CCCR_DAR;
+
/* Write config */
m_can_write(cdev, M_CAN_CCCR, cccr);
m_can_write(cdev, M_CAN_TEST, test);
@@ -1310,7 +1357,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_FD;
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_ONE_SHOT;
/* Set properties depending on M_CAN version */
switch (m_can_dev->version) {
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 6b0c6a99fc8d..10aa3e457c33 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Copyright (C) 2016 PEAK System-Technik GmbH
@@ -122,7 +121,8 @@ static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
- priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+ priv->can.ctrlmode &
+ CAN_CTRLMODE_3_SAMPLES);
cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
@@ -232,6 +232,20 @@ static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
return pucan_write_cmd(priv);
}
+static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ u64 ts_us;
+
+ ts_us = (u64)le32_to_cpu(ts_high) << 32;
+ ts_us |= le32_to_cpu(ts_low);
+
+ /* IP core timestamps are µs. */
+ hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
+
+ return netif_rx(skb);
+}
+
/* handle the reception of one CAN frame */
static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
struct pucan_rx_msg *msg)
@@ -299,7 +313,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
stats->rx_bytes += cf->len;
stats->rx_packets++;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
@@ -325,7 +339,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
if (pucan_status_is_rx_barrier(msg)) {
-
if (priv->enable_tx_path) {
int err = priv->enable_tx_path(priv);
@@ -393,7 +406,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index 95b23caa7dd6..a72719dc3b74 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * CAN driver for PEAK System micro-CAN based adapters
+/* CAN driver for PEAK System micro-CAN based adapters
*
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 13b10cbf236a..d08a3d559114 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
@@ -841,7 +840,8 @@ err_disable_pci:
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
- * (err is unchanged if negative) */
+ * (err is unchanged if negative)
+ */
return pcibios_err_to_errno(err);
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index bf5adea9c0a3..48575900adb7 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -744,7 +744,6 @@ static int rcar_can_probe(struct platform_device *pdev)
{
struct rcar_can_priv *priv;
struct net_device *ndev;
- struct resource *mem;
void __iomem *addr;
u32 clock_select = CLKR_CLKP1;
int err = -ENODEV;
@@ -759,8 +758,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index edaa1ca972c1..de59dd6aad29 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1630,7 +1630,6 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
- struct resource *mem;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1704,8 +1703,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail_dev;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e6a668ee7730..e8328910a234 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+/* Copyright (c) 2014 Protonic Holland,
+ * David Jander
+ * Copyright (C) 2014-2017 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
*/
#include <linux/can/dev.h>
@@ -11,14 +12,17 @@ struct can_rx_offload_cb {
u32 timestamp;
};
-static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+static inline struct can_rx_offload_cb *
+can_rx_offload_get_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
return (struct can_rx_offload_cb *)skb->cb;
}
-static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+static inline bool
+can_rx_offload_le(struct can_rx_offload *offload,
+ unsigned int a, unsigned int b)
{
if (offload->inc)
return a <= b;
@@ -26,7 +30,8 @@ static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned in
return a >= b;
}
-static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+static inline unsigned int
+can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
{
if (offload->inc)
return (*val)++;
@@ -36,7 +41,9 @@ static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, un
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
{
- struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+ struct can_rx_offload *offload = container_of(napi,
+ struct can_rx_offload,
+ napi);
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
@@ -65,8 +72,9 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
return work_done;
}
-static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
- int (*compare)(struct sk_buff *a, struct sk_buff *b))
+static inline void
+__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
struct sk_buff *pos, *insert = NULL;
@@ -101,47 +109,70 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
cb_a = can_rx_offload_get_cb(a);
cb_b = can_rx_offload_get_cb(b);
- /* Substract two u32 and return result as int, to keep
+ /* Subtract two u32 and return result as int, to keep
* difference steady around the u32 overflow.
*/
return cb_b->timestamp - cb_a->timestamp;
}
-static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+/**
+ * can_rx_offload_offload_one() - Read one CAN frame from HW
+ * @offload: pointer to rx_offload context
+ * @n: number of mailbox to read
+ *
+ * The task of this function is to read a CAN frame from mailbox @n
+ * from the device and return the mailbox's content as a struct
+ * sk_buff.
+ *
+ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+ * allocated, the mailbox contents is discarded by reading it into an
+ * overflow buffer. This way the mailbox is marked as free by the
+ * driver.
+ *
+ * Return: A pointer to skb containing the CAN frame on success.
+ *
+ * NULL if the mailbox @n is empty.
+ *
+ * ERR_PTR() in case of an error
+ */
+static struct sk_buff *
+can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct can_rx_offload_cb *cb;
- struct can_frame *cf;
- int ret;
+ bool drop = false;
+ u32 timestamp;
- /* If queue is full or skb not available, read to discard mailbox */
- if (likely(skb_queue_len(&offload->skb_queue) <=
- offload->skb_queue_len_max))
- skb = alloc_can_skb(offload->dev, &cf);
+ /* If queue is full drop frame */
+ if (unlikely(skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max))
+ drop = true;
- if (!skb) {
- struct can_frame cf_overflow;
- u32 timestamp;
+ skb = offload->mailbox_read(offload, n, &timestamp, drop);
+ /* Mailbox was empty. */
+ if (unlikely(!skb))
+ return NULL;
- ret = offload->mailbox_read(offload, &cf_overflow,
- &timestamp, n);
- if (ret)
- offload->dev->stats.rx_dropped++;
+ /* There was a problem reading the mailbox, propagate
+ * error value.
+ */
+ if (unlikely(IS_ERR(skb))) {
+ offload->dev->stats.rx_dropped++;
+ offload->dev->stats.rx_fifo_errors++;
- return NULL;
+ return skb;
}
+ /* Mailbox was read. */
cb = can_rx_offload_get_cb(skb);
- ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
- if (!ret) {
- kfree_skb(skb);
- return NULL;
- }
+ cb->timestamp = timestamp;
return skb;
}
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 pending)
{
struct sk_buff_head skb_queue;
unsigned int i;
@@ -157,8 +188,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
continue;
skb = can_rx_offload_offload_one(offload, i);
- if (!skb)
- break;
+ if (IS_ERR_OR_NULL(skb))
+ continue;
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
}
@@ -171,8 +202,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
- if ((queue_len = skb_queue_len(&offload->skb_queue)) >
- (offload->skb_queue_len_max / 8))
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
@@ -188,7 +219,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
struct sk_buff *skb;
int received = 0;
- while ((skb = can_rx_offload_offload_one(offload, 0))) {
+ while (1) {
+ skb = can_rx_offload_offload_one(offload, 0);
+ if (IS_ERR(skb))
+ continue;
+ if (!skb)
+ break;
+
skb_queue_tail(&offload->skb_queue, skb);
received++;
}
@@ -207,8 +244,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
unsigned long flags;
if (skb_queue_len(&offload->skb_queue) >
- offload->skb_queue_len_max)
- return -ENOMEM;
+ offload->skb_queue_len_max) {
+ kfree_skb(skb);
+ return -ENOBUFS;
+ }
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
@@ -250,8 +289,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb)
{
if (skb_queue_len(&offload->skb_queue) >
- offload->skb_queue_len_max)
- return -ENOMEM;
+ offload->skb_queue_len_max) {
+ kfree_skb(skb);
+ return -ENOBUFS;
+ }
skb_queue_tail(&offload->skb_queue, skb);
can_rx_offload_schedule(offload);
@@ -260,7 +301,9 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
-static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+static int can_rx_offload_init_queue(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
{
offload->dev = dev;
@@ -269,7 +312,6 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
- can_rx_offload_reset(offload);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
@@ -278,7 +320,8 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
return 0;
}
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload)
{
unsigned int weight;
@@ -298,7 +341,8 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload, unsigned int weight)
{
if (!offload->mailbox_read)
return -EINVAL;
@@ -309,7 +353,6 @@ EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
void can_rx_offload_enable(struct can_rx_offload *offload)
{
- can_rx_offload_reset(offload);
napi_enable(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
@@ -320,8 +363,3 @@ void can_rx_offload_del(struct can_rx_offload *offload)
skb_queue_purge(&offload->skb_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
-
-void can_rx_offload_reset(struct can_rx_offload *offload)
-{
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index bb6032211043..0a9f42e5fedf 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bee9f7b8dad6..5009ff294941 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,7 +22,6 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -321,6 +320,18 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
mcp251x_spi_trans(spi, 3);
}
+static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+ priv->spi_tx_buf[1] = reg;
+ priv->spi_tx_buf[2] = v1;
+ priv->spi_tx_buf[3] = v2;
+
+ mcp251x_spi_trans(spi, 4);
+}
+
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
u8 mask, u8 val)
{
@@ -457,6 +468,39 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
+/* May only be called when device is sleeping! */
+static int mcp251x_hw_wake(struct spi_device *spi)
+{
+ unsigned long timeout;
+
+ /* Force wakeup interrupt to wake device, but don't execute IST */
+ disable_irq(spi->irq);
+ mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+
+ /* Wait for oscillator startup timer after wake up */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ /* Put device into config mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
+
+ /* Wait for the device to enter config mode */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Disable and clear pending interrupts */
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
+ enable_irq(spi->irq);
+
+ return 0;
+}
+
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -646,8 +690,7 @@ static int mcp251x_stop(struct net_device *net)
mutex_lock(&priv->mcp_lock);
/* Disable and clear pending interrupts */
- mcp251x_write_reg(spi, CANINTE, 0x00);
- mcp251x_write_reg(spi, CANINTF, 0x00);
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
mcp251x_write_reg(spi, TXBCTRL(0), 0);
mcp251x_clean(net);
@@ -715,8 +758,13 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mcp251x_hw_reset(spi);
- mcp251x_setup(net, spi);
+ if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ mcp251x_hw_reset(spi);
+ mcp251x_setup(net, spi);
+ } else {
+ mcp251x_hw_wake(spi);
+ }
+ priv->force_quit = 0;
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -728,7 +776,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
- priv->force_quit = 0;
}
if (priv->restart_tx) {
@@ -913,7 +960,7 @@ static int mcp251x_open(struct net_device *net)
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
- ret = mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_wake(spi);
if (ret)
goto out_free_wq;
ret = mcp251x_setup(net, spi);
@@ -986,19 +1033,19 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
const void *match = device_get_match_data(&spi->dev);
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
freq = clk_get_rate(clk);
- if (freq == 0 && pdata)
- freq = pdata->oscillator_frequency;
+ if (freq == 0)
+ device_property_read_u32(&spi->dev, "clock-frequency", &freq);
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
@@ -1155,13 +1202,13 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
-
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ if (priv->after_suspend & AFTER_SUSPEND_UP)
mcp251x_power_enable(priv->transceiver, 1);
+
+ if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP))
queue_work(priv->wq, &priv->restart_work);
- } else {
+ else
priv->after_suspend = 0;
- }
priv->force_quit = 0;
enable_irq(spi->irq);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index f4cd88196404..e3ba8ab0cbf4 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -771,7 +771,6 @@ static int sun4ican_remove(struct platform_device *pdev)
static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *mem;
struct clk *clk;
void __iomem *addr;
int err, irq;
@@ -791,8 +790,7 @@ static int sun4ican_probe(struct platform_device *pdev)
goto exit;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = -EBUSY;
goto exit;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f8b19eef5d26..94b1491b569f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
*/
#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
+#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
/* TI HECC module registers */
#define HECC_CANME 0x0 /* Mailbox enable */
@@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANTA 0x10 /* Transmission acknowledge */
#define HECC_CANAA 0x14 /* Abort acknowledge */
#define HECC_CANRMP 0x18 /* Receive message pending */
-#define HECC_CANRML 0x1C /* Remote message lost */
+#define HECC_CANRML 0x1C /* Receive message lost */
#define HECC_CANRFP 0x20 /* Remote frame pending */
#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
#define HECC_CANMC 0x28 /* Master control */
@@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
HECC_CANES_CRCE | HECC_CANES_SE |\
HECC_CANES_ACKE)
+#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
+ HECC_CANES_EP | HECC_CANES_EW)
#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
@@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
}
- /* Prevent message over-write & Enable interrupts */
- hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
+ /* Enable tx interrupts */
+ hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
+
+ /* Prevent message over-write to create a rx fifo, but not for
+ * the lowest priority mailbox, since that allows detecting
+ * overflows instead of the hardware silently dropping the
+ * messages.
+ */
+ mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ hecc_write(priv, HECC_CANOPC, mbx_mask);
+
+ /* Enable interrupts */
if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
@@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
+ /* Disable the CPK; stop sending, erroring and acking */
+ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+
/* Disable interrupts and disable mailboxes */
hecc_write(priv, HECC_CANGIM, 0);
hecc_write(priv, HECC_CANMIM, 0);
@@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
hecc_set_bit(priv, HECC_CANME, mbx_mask);
spin_unlock_irqrestore(&priv->mbx_lock, flags);
- hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
- hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
hecc_write(priv, HECC_CANTRS, mbx_mask);
return NETDEV_TX_OK;
@@ -521,12 +535,27 @@ struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
return container_of(offload, struct ti_hecc_priv, offload);
}
-static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mbxno)
+static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mbxno, u32 *timestamp,
+ bool drop)
{
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
- u32 data;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u32 data, mbx_mask;
+
+ mbx_mask = BIT(mbxno);
+
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE)
@@ -548,7 +577,26 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
*timestamp = hecc_read_stamp(priv, mbxno);
- return 1;
+ /* Check for FIFO overrun.
+ *
+ * All but the last RX mailbox have activated overwrite
+ * protection. So skip check for overrun, if we're not
+ * handling the last RX mailbox.
+ *
+ * As the overwrite protection for the last RX mailbox is
+ * disabled, the CAN core might update while we're reading
+ * it. This means the skb might be inconsistent.
+ *
+ * Return an error to let rx-offload discard this CAN frame.
+ */
+ if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
+ hecc_read(priv, HECC_CANRML) & mbx_mask))
+ skb = ERR_PTR(-ENOBUFS);
+
+ mark_as_read:
+ hecc_write(priv, HECC_CANRMP, mbx_mask);
+
+ return skb;
}
static int ti_hecc_error(struct net_device *ndev, int int_status,
@@ -558,92 +606,73 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
struct can_frame *cf;
struct sk_buff *skb;
u32 timestamp;
+ int err;
- /* propagate the error condition to the can stack */
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
- if (printk_ratelimit())
- netdev_err(priv->ndev,
- "%s: alloc_can_err_skb() failed\n",
- __func__);
- return -ENOMEM;
- }
-
- if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
- if ((int_status & HECC_CANGIF_BOIF) == 0) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- ++priv->can.can_stats.error_warning;
- cf->can_id |= CAN_ERR_CRTL;
- if (hecc_read(priv, HECC_CANTEC) > 96)
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- if (hecc_read(priv, HECC_CANREC) > 96)
- cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- }
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
- netdev_dbg(priv->ndev, "Error Warning interrupt\n");
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
- }
-
- if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
- if ((int_status & HECC_CANGIF_BOIF) == 0) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- ++priv->can.can_stats.error_passive;
- cf->can_id |= CAN_ERR_CRTL;
- if (hecc_read(priv, HECC_CANTEC) > 127)
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- if (hecc_read(priv, HECC_CANREC) > 127)
- cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if (err_status & HECC_BUS_ERROR) {
+ /* propagate the error condition to the can stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ if (net_ratelimit())
+ netdev_err(priv->ndev,
+ "%s: alloc_can_err_skb() failed\n",
+ __func__);
+ return -ENOMEM;
}
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
- netdev_dbg(priv->ndev, "Error passive interrupt\n");
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
- }
- /* Need to check busoff condition in error status register too to
- * ensure warning interrupts don't hog the system
- */
- if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
- priv->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
- /* Disable all interrupts in bus-off to avoid int hog */
- hecc_write(priv, HECC_CANGIM, 0);
- ++priv->can.can_stats.bus_off;
- can_bus_off(ndev);
- }
-
- if (err_status & HECC_BUS_ERROR) {
++priv->can.can_stats.bus_error;
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
- if (err_status & HECC_CANES_FE) {
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
+ if (err_status & HECC_CANES_FE)
cf->data[2] |= CAN_ERR_PROT_FORM;
- }
- if (err_status & HECC_CANES_BE) {
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
+ if (err_status & HECC_CANES_BE)
cf->data[2] |= CAN_ERR_PROT_BIT;
- }
- if (err_status & HECC_CANES_SE) {
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
+ if (err_status & HECC_CANES_SE)
cf->data[2] |= CAN_ERR_PROT_STUFF;
- }
- if (err_status & HECC_CANES_CRCE) {
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+ if (err_status & HECC_CANES_CRCE)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- }
- if (err_status & HECC_CANES_ACKE) {
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+ if (err_status & HECC_CANES_ACKE)
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
- }
+
+ timestamp = hecc_read(priv, HECC_CANLNT);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
}
- timestamp = hecc_read(priv, HECC_CANLNT);
- can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
return 0;
}
+static void ti_hecc_change_state(struct net_device *ndev,
+ enum can_state rx_state,
+ enum can_state tx_state)
+{
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ skb = alloc_can_err_skb(priv->ndev, &cf);
+ if (unlikely(!skb)) {
+ priv->can.state = max(tx_state, rx_state);
+ return;
+ }
+
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+ if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+ cf->data[6] = hecc_read(priv, HECC_CANTEC);
+ cf->data[7] = hecc_read(priv, HECC_CANREC);
+ }
+
+ timestamp = hecc_read(priv, HECC_CANLNT);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
+}
+
static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
@@ -651,6 +680,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
struct net_device_stats *stats = &ndev->stats;
u32 mbxno, mbx_mask, int_status, err_status, stamp;
unsigned long flags, rx_pending;
+ u32 handled = 0;
int_status = hecc_read(priv,
priv->use_hecc1int ?
@@ -660,17 +690,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
return IRQ_NONE;
err_status = hecc_read(priv, HECC_CANES);
- if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
- HECC_CANES_EP | HECC_CANES_EW))
+ if (unlikely(err_status & HECC_CANES_FLAGS))
ti_hecc_error(ndev, int_status, err_status);
+ if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
+ enum can_state rx_state, tx_state;
+ u32 rec = hecc_read(priv, HECC_CANREC);
+ u32 tec = hecc_read(priv, HECC_CANTEC);
+
+ if (int_status & HECC_CANGIF_WLIF) {
+ handled |= HECC_CANGIF_WLIF;
+ rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
+ tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
+ netdev_dbg(priv->ndev, "Error Warning interrupt\n");
+ ti_hecc_change_state(ndev, rx_state, tx_state);
+ }
+
+ if (int_status & HECC_CANGIF_EPIF) {
+ handled |= HECC_CANGIF_EPIF;
+ rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
+ tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
+ ti_hecc_change_state(ndev, rx_state, tx_state);
+ }
+
+ if (int_status & HECC_CANGIF_BOIF) {
+ handled |= HECC_CANGIF_BOIF;
+ rx_state = CAN_STATE_BUS_OFF;
+ tx_state = CAN_STATE_BUS_OFF;
+ netdev_dbg(priv->ndev, "Bus off interrupt\n");
+
+ /* Disable all interrupts */
+ hecc_write(priv, HECC_CANGIM, 0);
+ can_bus_off(ndev);
+ ti_hecc_change_state(ndev, rx_state, tx_state);
+ }
+ } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
+ enum can_state new_state, tx_state, rx_state;
+ u32 rec = hecc_read(priv, HECC_CANREC);
+ u32 tec = hecc_read(priv, HECC_CANTEC);
+
+ if (rec >= 128 || tec >= 128)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (rec >= 96 || tec >= 96)
+ new_state = CAN_STATE_ERROR_WARNING;
+ else
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state < priv->can.state) {
+ rx_state = rec >= tec ? new_state : 0;
+ tx_state = rec <= tec ? new_state : 0;
+ ti_hecc_change_state(ndev, rx_state, tx_state);
+ }
+ }
+
if (int_status & HECC_CANGIF_GMIF) {
while (priv->tx_tail - priv->tx_head > 0) {
mbxno = get_tx_tail_mb(priv);
mbx_mask = BIT(mbxno);
if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
break;
- hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
hecc_write(priv, HECC_CANTA, mbx_mask);
spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask);
@@ -695,16 +774,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
can_rx_offload_irq_offload_timestamp(&priv->offload,
rx_pending);
- hecc_write(priv, HECC_CANRMP, rx_pending);
}
}
/* clear all interrupt conditions - read back to avoid spurious ints */
if (priv->use_hecc1int) {
- hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
+ hecc_write(priv, HECC_CANGIF1, handled);
int_status = hecc_read(priv, HECC_CANGIF1);
} else {
- hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
+ hecc_write(priv, HECC_CANGIF0, handled);
int_status = hecc_read(priv, HECC_CANGIF0);
}
@@ -877,7 +955,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->offload.mailbox_read = ti_hecc_mailbox_read;
priv->offload.mb_first = HECC_RX_FIRST_MBOX;
- priv->offload.mb_last = HECC_MAX_TX_MBOX;
+ priv->offload.mb_last = HECC_RX_LAST_MBOX;
err = can_rx_offload_add_timestamp(ndev, &priv->offload);
if (err) {
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bd6eb9967630..2f74f6704c12 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev)
rc);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 19a702ac49e4..21faa2ec4632 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n");
unregister_candev(priv->netdev);
- free_candev(priv->netdev);
-
mcba_urb_unlink(priv);
+ free_candev(priv->netdev);
}
static struct usb_driver mcba_usb_driver = {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 617da295b6c1..d2539c95adb6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -100,7 +100,7 @@ struct pcan_usb_msg_context {
u8 *end;
u8 rec_cnt;
u8 rec_idx;
- u8 rec_data_idx;
+ u8 rec_ts_idx;
struct net_device *netdev;
struct pcan_usb *pdev;
};
@@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
}
if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
/* no error (back to active state) */
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
- return 0;
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ break;
}
break;
@@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
}
if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
- /* no error (back to active state) */
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
- return 0;
+ /* no error (back to warning state) */
+ new_state = CAN_STATE_ERROR_WARNING;
+ break;
}
break;
@@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
mc->pdev->dev.can.can_stats.error_warning++;
break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ break;
+
default:
/* CAN_STATE_MAX (trick to handle other errors) */
cf->can_id |= CAN_ERR_CRTL;
@@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
mc->ptr += PCAN_USB_CMD_ARGS;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
- int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
+ int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
if (err)
return err;
+
+ /* Next packet in the buffer will have a timestamp on a single
+ * byte
+ */
+ mc->rec_ts_idx++;
}
switch (f) {
@@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
cf->can_dlc = get_can_dlc(rec_len);
- /* first data packet timestamp is a word */
- if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
+ /* Only first packet timestamp is a word */
+ if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
goto decode_failed;
+ /* Next packet in the buffer will have a timestamp on a single byte */
+ mc->rec_ts_idx++;
+
/* read data */
memset(cf->data, 0x0, sizeof(cf->data));
if (status_len & PCAN_USB_STATUSLEN_RTR) {
@@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
/* handle normal can frames here */
} else {
err = pcan_usb_decode_data(&mc, sl);
- mc.rec_data_idx++;
}
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 65dce642b86b..0b7766b715fd 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev = netdev_priv(netdev);
/* allocate a buffer large enough to send commands */
- dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+ dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
err = -ENOMEM;
goto lbl_free_candev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d596a2ad7f78..8fa224b28218 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n");
unregister_netdev(priv->netdev);
- free_candev(priv->netdev);
-
unlink_all_urbs(priv);
+ free_candev(priv->netdev);
}
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 911b34316c9d..4a96e2dd7d77 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -194,7 +194,7 @@ struct xcan_devtype_data {
*/
struct xcan_priv {
struct can_priv can;
- spinlock_t tx_lock;
+ spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -400,7 +400,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
XCAN_SR_CONFIG_MASK;
if (!is_config_mode) {
netdev_alert(ndev,
- "BUG! Cannot set bittiming - CAN is not in config mode\n");
+ "BUG! Cannot set bittiming - CAN is not in config mode\n");
return -EPERM;
}
@@ -470,7 +470,13 @@ static int xcan_chip_start(struct net_device *ndev)
if (err < 0)
return err;
- /* Enable interrupts */
+ /* Enable interrupts
+ *
+ * We enable the ERROR interrupt even with
+ * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
+ * dedicated interrupt for a state change to
+ * ERROR_WARNING/ERROR_PASSIVE.
+ */
ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
@@ -482,11 +488,10 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg_msr = XCAN_MSR_LBACK_MASK;
- } else {
+ else
reg_msr = 0x0;
- }
/* enable the first extended filter, if any, as cores with extended
* filtering default to non-receipt if all filters are disabled
@@ -981,12 +986,9 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
+ struct can_frame cf = { };
u32 err_status;
- skb = alloc_can_err_skb(ndev, &cf);
-
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
@@ -996,32 +998,27 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
/* Leave device in Config Mode in bus-off state */
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
can_bus_off(ndev);
- if (skb)
- cf->can_id |= CAN_ERR_BUSOFF;
+ cf.can_id |= CAN_ERR_BUSOFF;
} else {
enum can_state new_state = xcan_current_error_state(ndev);
if (new_state != priv->can.state)
- xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+ xcan_set_error_state(ndev, new_state, &cf);
}
/* Check for Arbitration lost interrupt */
if (isr & XCAN_IXR_ARBLST_MASK) {
priv->can.can_stats.arbitration_lost++;
- if (skb) {
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_LOSTARB;
+ cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
}
/* Check for RX FIFO Overflow interrupt */
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
}
/* Check for RX Match Not Finished interrupt */
@@ -1029,68 +1026,77 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
stats->rx_dropped++;
stats->rx_errors++;
netdev_err(ndev, "RX match not finished, frame discarded\n");
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
}
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
- if (skb)
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ bool berr_reporting = false;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ berr_reporting = true;
+ cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ }
/* Check for Ack error interrupt */
if (err_status & XCAN_ESR_ACKER_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_ACK;
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_ACK;
+ cf.data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
/* Check for Bit error interrupt */
if (err_status & XCAN_ESR_BERR_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_BIT;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_BIT;
}
}
/* Check for Stuff error interrupt */
if (err_status & XCAN_ESR_STER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_STUFF;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_STUFF;
}
}
/* Check for Form error interrupt */
if (err_status & XCAN_ESR_FMER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_FORM;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_FORM;
}
}
/* Check for CRC error interrupt */
if (err_status & XCAN_ESR_CRCER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
priv->can.can_stats.bus_error++;
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ if (cf.can_id) {
+ struct can_frame *skb_cf;
+ struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
+
+ if (skb) {
+ skb_cf->can_id |= cf.can_id;
+ memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
+ stats->rx_packets++;
+ stats->rx_bytes += CAN_ERR_DLC;
+ netif_rx(skb);
+ }
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
@@ -1599,7 +1605,6 @@ static const struct xcan_devtype_data xcan_zynq_data = {
static const struct xcan_devtype_data xcan_axi_data = {
.cantype = XAXI_CAN,
- .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1652,7 +1657,6 @@ MODULE_DEVICE_TABLE(of, xcan_of_match);
*/
static int xcan_probe(struct platform_device *pdev)
{
- struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
const struct of_device_id *of_id;
@@ -1664,8 +1668,7 @@ static int xcan_probe(struct platform_device *pdev)
const char *hw_tx_max_property;
/* Get the virtual base address for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 685e12b05a7c..c7667645f04a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -52,6 +52,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/ocelot/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index ae70b79628d6..9d384a32b3a2 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += ocelot/
obj-y += sja1105/
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9add84c79dd6..e43040c9f9ee 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -350,6 +350,18 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
+ int ret;
+
+ /* The watchdog reset does not work on 7278, we need to hit the
+ * "external" reset line through the reset controller.
+ */
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) {
+ ret = reset_control_assert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(priv->rcdev);
+ }
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
@@ -381,8 +393,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device_node *port;
- int mode;
unsigned int port_num;
+ phy_interface_t mode;
+ int err;
priv->moca_port = -1;
@@ -395,8 +408,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
* has completed, since they might be turned off at that
* time
*/
- mode = of_get_phy_mode(port);
- if (mode < 0)
+ err = of_get_phy_mode(port, &mode);
+ if (err)
continue;
if (mode == PHY_INTERFACE_MODE_INTERNAL)
@@ -1091,6 +1104,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
+ priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "switch");
+ if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER)
+ return PTR_ERR(priv->rcdev);
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
@@ -1218,11 +1236,13 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
priv->wol_ports_mask = 0;
+ /* Disable interrupts */
+ bcm_sf2_intr_disable(priv);
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
- /* Disable all ports and interrupts */
- bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
+ reset_control_assert(priv->rcdev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 1df30ccec42d..de386dd96d66 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/reset.h>
#include <net/dsa.h>
@@ -64,6 +65,8 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ struct reset_control *rcdev;
+
/* Register offsets indirection tables */
u32 type;
const u16 *reg_offsets;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 5d08e4430824..d8fda4a02640 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -422,6 +422,7 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
+ phy_interface_t interface;
int ret;
if (dev->pdata)
@@ -456,9 +457,9 @@ int ksz_switch_register(struct ksz_device *dev,
* device tree.
*/
if (dev->dev->of_node) {
- ret = of_get_phy_mode(dev->dev->of_node);
- if (ret >= 0)
- dev->interface = ret;
+ ret = of_get_phy_mode(dev->dev->of_node, &interface);
+ if (ret == 0)
+ dev->interface = interface;
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index add9e4279176..ed1ec10ec62b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1340,7 +1340,9 @@ mt7530_setup(struct dsa_switch *ds)
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- interface = of_get_phy_mode(dsa_to_port(ds, 5)->dn);
+ ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
@@ -1354,7 +1356,9 @@ mt7530_setup(struct dsa_switch *ds)
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (phy_node->parent == priv->dev->of_node->parent) {
- interface = of_get_phy_mode(mac_np);
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 66de492117ad..3bd988529178 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2390,7 +2390,14 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
if (chip->info->ops->set_egress_port) {
err = chip->info->ops->set_egress_port(chip,
- upstream_port);
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ upstream_port);
+ if (err)
+ return err;
+
+ err = chip->info->ops->set_egress_port(chip,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+ upstream_port);
if (err)
return err;
}
@@ -2720,9 +2727,179 @@ static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
ARRAY_SIZE(mv88e6xxx_devlink_params));
}
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -2841,11 +3018,23 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
unlock:
mv88e6xxx_reg_unlock(chip);
- /* Has to be called without holding the register lock, since
- * it takes the devlink lock, and we later take the locks in
- * the reverse order when getting/setting parameters.
+ if (err)
+ return err;
+
+ /* Have to be called without holding the register lock, since
+ * they take the devlink lock, and we later take the locks in
+ * the reverse order when getting/setting parameters or
+ * resource occupancy.
*/
- return mv88e6xxx_setup_devlink_params(ds);
+ err = mv88e6xxx_setup_devlink_resources(ds);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_params(ds);
+ if (err)
+ dsa_devlink_resources_unregister(ds);
+
+ return err;
}
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
@@ -4299,6 +4488,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4321,6 +4511,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4341,6 +4532,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
@@ -4363,6 +4555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4385,6 +4578,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 8,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4405,6 +4599,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
.num_databases = 4096,
+ .num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
@@ -4428,6 +4623,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4451,6 +4647,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4474,6 +4671,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4496,6 +4694,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4519,6 +4718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4541,6 +4741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4564,6 +4765,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4584,6 +4786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4607,6 +4810,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4630,6 +4834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
@@ -4680,6 +4885,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4750,6 +4956,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4774,6 +4981,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4797,6 +5005,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
+ .num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
@@ -4821,6 +5030,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4843,6 +5053,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4865,6 +5076,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4888,6 +5100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4911,6 +5124,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -5036,6 +5250,80 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
return err;
}
+static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ enum mv88e6xxx_egress_direction direction = ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+ int err;
+
+ if (!chip->info->ops->set_egress_port)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+ if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
+ mirror->to_local_port) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Can't change egress port when other mirror is active */
+ if (other_mirrors) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = chip->info->ops->set_egress_port(chip,
+ direction,
+ mirror->to_local_port);
+ if (err)
+ goto out;
+ }
+
+ err = mv88e6xxx_port_set_mirror(chip, port, direction, true);
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ enum mv88e6xxx_egress_direction direction = mirror->ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_mirror(chip, port, direction, false))
+ dev_err(ds->dev, "p%d: failed to disable mirroring\n", port);
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= mirror->ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Reset egress port when no other mirror is active */
+ if (!other_mirrors) {
+ if (chip->info->ops->set_egress_port(chip,
+ direction,
+ dsa_upstream_port(ds,
+ port)))
+ dev_err(ds->dev, "failed to set egress port\n");
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast)
{
@@ -5091,6 +5379,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_mirror_add = mv88e6xxx_port_mirror_add,
+ .port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
.crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
.port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 52f7726cc099..8a8e38bfb161 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -33,6 +33,11 @@ enum mv88e6xxx_egress_mode {
MV88E6XXX_EGRESS_MODE_ETHERTYPE,
};
+enum mv88e6xxx_egress_direction {
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+};
+
enum mv88e6xxx_frame_mode {
MV88E6XXX_FRAME_MODE_NORMAL,
MV88E6XXX_FRAME_MODE_DSA,
@@ -94,6 +99,7 @@ struct mv88e6xxx_info {
u16 prod_num;
const char *name;
unsigned int num_databases;
+ unsigned int num_macs;
unsigned int num_ports;
unsigned int num_internal_phys;
unsigned int num_gpio;
@@ -227,6 +233,8 @@ struct mv88e6xxx_port {
u64 vtu_member_violation;
u64 vtu_miss_violation;
u8 cmode;
+ bool mirror_ingress;
+ bool mirror_egress;
unsigned int serdes_irq;
};
@@ -310,6 +318,10 @@ struct mv88e6xxx_chip {
u16 evcap_config;
u16 enable_count;
+ /* Current ingress and egress monitor ports */
+ int egress_dest_port;
+ int ingress_dest_port;
+
/* Per-port timestamping resources. */
struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -464,7 +476,9 @@ struct mv88e6xxx_ops {
int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
- int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*set_egress_port)(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
#define MV88E6XXX_CASCADE_PORT_NONE 0xe
#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
@@ -613,6 +627,11 @@ static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
return chip->info->num_databases;
}
+static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_macs;
+}
+
static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
{
return chip->info->num_ports;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 25ec4c0ac589..120a65d3e3ef 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -263,8 +263,11 @@ int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 reg;
int err;
@@ -272,13 +275,28 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg &= ~(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK |
- MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ if (!err)
+ *dest_port_chip = port;
- reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK) |
- port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
-
- return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ return err;
}
/* Older generations also call this the ARP destination. It has been
@@ -310,22 +328,32 @@ static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
}
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 ptr;
int err;
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
- err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
+ break;
+ default:
+ return -EINVAL;
+ }
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ if (!err)
+ *dest_port_chip = port;
- return 0;
+ return err;
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 40fc0e13fc45..bc5a6b2bb1e4 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -288,8 +288,12 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip);
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
@@ -341,5 +345,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index d8a03bbba83c..bdcd25560dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -154,6 +154,11 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
return mv88e6xxx_g1_atu_op_wait(chip);
}
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index bdbb72fc20ed..87bfe7c8c9cd 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return err;
}
+/* Offset 0x0E: ATU Statistics */
+
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
+ kind | bin);
+}
+
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
+{
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
+}
+
/* Offset 0x0F: Priority Override Table */
static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 42da4bca73e8..1f42ee656816 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -113,7 +113,16 @@
#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
/* Offset 0x0E: ATU Stats Register */
-#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff
/* Offset 0x0F: Priority Override Table */
#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
@@ -353,6 +362,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
@@ -515,6 +526,18 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
+ u16 kind, u16 bin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
+ u16 *stats)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 15ef81654b67..7fe256c5739d 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1181,6 +1181,43 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror)
+{
+ bool *mirror_port;
+ u16 reg;
+ u16 bit;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_ingress;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_egress;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg &= ~bit;
+ if (mirror)
+ reg |= bit;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ if (!err)
+ *mirror_port = mirror;
+
+ return err;
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03a480cd71b9..0ec4327c2b42 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -368,6 +368,9 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror);
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 073cbd0bb91b..d838c174dc0d 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
new file mode 100644
index 000000000000..0031ca814346
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX
+ tristate "Ocelot / Felix Ethernet switch support"
+ depends on NET_DSA && PCI
+ select MSCC_OCELOT_SWITCH
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC9959 network switch, which is a member of
+ the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
+ It is embedded as a PCIe function of the NXP LS1028A ENETC integrated
+ endpoint.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
new file mode 100644
index 000000000000..37ad403e0b2a
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+
+mscc_felix-objs := \
+ felix.o \
+ felix_vsc9959.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
new file mode 100644
index 000000000000..ce3637b504dd
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <net/dsa.h>
+#include "felix.h"
+
+static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_OCELOT;
+}
+
+static int felix_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_set_ageing_time(ocelot, ageing_time);
+
+ return 0;
+}
+
+static void felix_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_adjust_link(ocelot, port, phydev);
+}
+
+static int felix_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_dump(ocelot, port, cb, data);
+}
+
+static int felix_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool vlan_aware;
+
+ vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
+}
+
+static int felix_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
+
+static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static int felix_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_bridge_join(ocelot, port, br);
+}
+
+static void felix_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_bridge_leave(ocelot, port, br);
+}
+
+/* This callback needs to be present */
+static int felix_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_vlan_filtering(ocelot, port, enabled);
+
+ return 0;
+}
+
+static void felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_add(ocelot, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err) {
+ dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
+ vid, port, err);
+ return;
+ }
+ }
+}
+
+static int felix_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_del(ocelot, port, vid);
+ if (err) {
+ dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
+ vid, port, err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_enable(ocelot, port, phy);
+
+ return 0;
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_disable(ocelot, port);
+}
+
+static void felix_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_strings(ocelot, port, stringset, data);
+}
+
+static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int felix_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static int felix_init_structs(struct felix *felix, int num_phys_ports)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ resource_size_t base;
+ int port, i, err;
+
+ ocelot->num_phys_ports = num_phys_ports;
+ ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ ocelot->map = felix->info->map;
+ ocelot->stats_layout = felix->info->stats_layout;
+ ocelot->num_stats = felix->info->num_stats;
+ ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->ops = felix->info->ops;
+
+ base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+
+ for (i = 0; i < TARGET_MAX; i++) {
+ struct regmap *target;
+ struct resource *res;
+
+ if (!felix->info->target_io_res[i].name)
+ continue;
+
+ res = &felix->info->target_io_res[i];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target)) {
+ dev_err(ocelot->dev,
+ "Failed to map device memory space\n");
+ return PTR_ERR(target);
+ }
+
+ ocelot->targets[i] = target;
+ }
+
+ err = ocelot_regfields_init(ocelot, felix->info->regfields);
+ if (err) {
+ dev_err(ocelot->dev, "failed to init reg fields map\n");
+ return err;
+ }
+
+ for (port = 0; port < num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port;
+ void __iomem *port_regs;
+ struct resource *res;
+
+ ocelot_port = devm_kzalloc(ocelot->dev,
+ sizeof(struct ocelot_port),
+ GFP_KERNEL);
+ if (!ocelot_port) {
+ dev_err(ocelot->dev,
+ "failed to allocate port memory\n");
+ return -ENOMEM;
+ }
+
+ res = &felix->info->port_io_res[port];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ port_regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(port_regs)) {
+ dev_err(ocelot->dev,
+ "failed to map registers for port %d\n", port);
+ return PTR_ERR(port_regs);
+ }
+
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->regs = port_regs;
+ ocelot->ports[port] = ocelot_port;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization done here so that we can allocate structures with
+ * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
+ * us to allocate structures twice (leak memory) and map PCI memory twice
+ * (which will not work).
+ */
+static int felix_setup(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port, err;
+
+ err = felix_init_structs(felix, ds->num_ports);
+ if (err)
+ return err;
+
+ ocelot_init(ocelot);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ ocelot_init_port(ocelot, port);
+
+ if (dsa_is_cpu_port(ds, port))
+ ocelot_set_cpu_port(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
+ }
+
+ return 0;
+}
+
+static void felix_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* stop workqueue thread */
+ ocelot_deinit(ocelot);
+}
+
+static const struct dsa_switch_ops felix_switch_ops = {
+ .get_tag_protocol = felix_get_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .adjust_link = felix_adjust_link,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_prepare = felix_vlan_prepare,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+};
+
+static struct felix_info *felix_instance_tbl[] = {
+ [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959,
+};
+
+static int felix_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum felix_instance instance = id->driver_data;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ goto err_pci_enable;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+ if (!felix) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+ goto err_alloc_felix;
+ }
+
+ pci_set_drvdata(pdev, felix);
+ ocelot = &felix->ocelot;
+ ocelot->dev = &pdev->dev;
+ felix->pdev = pdev;
+ felix->info = felix_instance_tbl[instance];
+
+ pci_set_master(pdev);
+
+ ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+ goto err_alloc_ds;
+ }
+
+ ds->dev = &pdev->dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ goto err_register_ds;
+ }
+
+ return 0;
+
+err_register_ds:
+ kfree(ds);
+err_alloc_ds:
+err_alloc_felix:
+ kfree(felix);
+err_dma:
+ pci_disable_device(pdev);
+err_pci_enable:
+ return err;
+}
+
+static void felix_pci_remove(struct pci_dev *pdev)
+{
+ struct felix *felix;
+
+ felix = pci_get_drvdata(pdev);
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id felix_ids[] = {
+ {
+ /* NXP LS1028A */
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
+ .driver_data = FELIX_INSTANCE_VSC9959,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, felix_ids);
+
+static struct pci_driver felix_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = felix_ids,
+ .probe = felix_pci_probe,
+ .remove = felix_pci_remove,
+};
+
+module_pci_driver(felix_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
new file mode 100644
index 000000000000..204296e51d0c
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP Semiconductors
+ */
+#ifndef _MSCC_FELIX_H
+#define _MSCC_FELIX_H
+
+#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+
+/* Platform-specific information */
+struct felix_info {
+ struct resource *target_io_res;
+ struct resource *port_io_res;
+ const struct reg_field *regfields;
+ const u32 *const *map;
+ const struct ocelot_ops *ops;
+ int shared_queue_sz;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+ int num_ports;
+ int pci_bar;
+};
+
+extern struct felix_info felix_info_vsc9959;
+
+enum felix_instance {
+ FELIX_INSTANCE_VSC9959 = 0,
+};
+
+/* DSA glue / front-end for struct ocelot */
+struct felix {
+ struct dsa_switch *ds;
+ struct pci_dev *pdev;
+ struct felix_info *info;
+ struct ocelot ocelot;
+};
+
+#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
new file mode 100644
index 000000000000..d67bd14a48e0
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2017 Microsemi Corporation
+ * Copyright 2018-2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include "felix.h"
+
+static const u32 vsc9959_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x0089a0),
+ REG(ANA_VLANMASK, 0x0089a4),
+ REG_RESERVED(ANA_PORT_B_DOMAIN),
+ REG(ANA_ANAGEFIL, 0x0089ac),
+ REG(ANA_ANEVENTS, 0x0089b0),
+ REG(ANA_STORMLIMIT_BURST, 0x0089b4),
+ REG(ANA_STORMLIMIT_CFG, 0x0089b8),
+ REG(ANA_ISOLATED_PORTS, 0x0089c8),
+ REG(ANA_COMMUNITY_PORTS, 0x0089cc),
+ REG(ANA_AUTOAGE, 0x0089d0),
+ REG(ANA_MACTOPTIONS, 0x0089d4),
+ REG(ANA_LEARNDISC, 0x0089d8),
+ REG(ANA_AGENCTRL, 0x0089dc),
+ REG(ANA_MIRRORPORTS, 0x0089e0),
+ REG(ANA_EMIRRORPORTS, 0x0089e4),
+ REG(ANA_FLOODING, 0x0089e8),
+ REG(ANA_FLOODING_IPMC, 0x008a08),
+ REG(ANA_SFLOW_CFG, 0x008a0c),
+ REG(ANA_PORT_MODE, 0x008a28),
+ REG(ANA_CUT_THRU_CFG, 0x008a48),
+ REG(ANA_PGID_PGID, 0x008400),
+ REG(ANA_TABLES_ANMOVED, 0x007f1c),
+ REG(ANA_TABLES_MACHDATA, 0x007f20),
+ REG(ANA_TABLES_MACLDATA, 0x007f24),
+ REG(ANA_TABLES_STREAMDATA, 0x007f28),
+ REG(ANA_TABLES_MACACCESS, 0x007f2c),
+ REG(ANA_TABLES_MACTINDX, 0x007f30),
+ REG(ANA_TABLES_VLANACCESS, 0x007f34),
+ REG(ANA_TABLES_VLANTIDX, 0x007f38),
+ REG(ANA_TABLES_ISDXACCESS, 0x007f3c),
+ REG(ANA_TABLES_ISDXTIDX, 0x007f40),
+ REG(ANA_TABLES_ENTRYLIM, 0x007f00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x007f48),
+ REG(ANA_TABLES_STREAMACCESS, 0x007f4c),
+ REG(ANA_TABLES_STREAMTIDX, 0x007f50),
+ REG(ANA_TABLES_SEQ_HISTORY, 0x007f54),
+ REG(ANA_TABLES_SEQ_MASK, 0x007f58),
+ REG(ANA_TABLES_SFID_MASK, 0x007f5c),
+ REG(ANA_TABLES_SFIDACCESS, 0x007f60),
+ REG(ANA_TABLES_SFIDTIDX, 0x007f64),
+ REG(ANA_MSTI_STATE, 0x008600),
+ REG(ANA_OAM_UPM_LM_CNT, 0x008000),
+ REG(ANA_SG_ACCESS_CTRL, 0x008a64),
+ REG(ANA_SG_CONFIG_REG_1, 0x007fb0),
+ REG(ANA_SG_CONFIG_REG_2, 0x007fb4),
+ REG(ANA_SG_CONFIG_REG_3, 0x007fb8),
+ REG(ANA_SG_CONFIG_REG_4, 0x007fbc),
+ REG(ANA_SG_CONFIG_REG_5, 0x007fc0),
+ REG(ANA_SG_GCL_GS_CONFIG, 0x007f80),
+ REG(ANA_SG_GCL_TI_CONFIG, 0x007f90),
+ REG(ANA_SG_STATUS_REG_1, 0x008980),
+ REG(ANA_SG_STATUS_REG_2, 0x008984),
+ REG(ANA_SG_STATUS_REG_3, 0x008988),
+ REG(ANA_PORT_VLAN_CFG, 0x007800),
+ REG(ANA_PORT_DROP_CFG, 0x007804),
+ REG(ANA_PORT_QOS_CFG, 0x007808),
+ REG(ANA_PORT_VCAP_CFG, 0x00780c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00781c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007820),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007860),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c),
+ REG(ANA_PORT_PORT_CFG, 0x007870),
+ REG(ANA_PORT_POL_CFG, 0x007874),
+ REG(ANA_PORT_PTP_CFG, 0x007878),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007880),
+ REG(ANA_PORT_SFID_CFG, 0x007884),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG_RESERVED(ANA_PFC_PFC_TIMER),
+ REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+ REG_RESERVED(ANA_IPT_IPT),
+ REG_RESERVED(ANA_PPT_PPT),
+ REG_RESERVED(ANA_FID_MAP_FID_MAP),
+ REG(ANA_AGGR_CFG, 0x008a68),
+ REG(ANA_CPUQ_CFG, 0x008a6c),
+ REG_RESERVED(ANA_CPUQ_CFG2),
+ REG(ANA_CPUQ_8021_CFG, 0x008a74),
+ REG(ANA_DSCP_CFG, 0x008ab4),
+ REG(ANA_DSCP_REWR_CFG, 0x008bb4),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14),
+ REG_RESERVED(ANA_VRAP_CFG),
+ REG_RESERVED(ANA_VRAP_HDR_DATA),
+ REG_RESERVED(ANA_VRAP_HDR_MASK),
+ REG(ANA_DISCARD_CFG, 0x008c40),
+ REG(ANA_FID_CFG, 0x008c44),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG_RESERVED(ANA_POL_STATE),
+ REG(ANA_POL_FLOWC, 0x008c48),
+ REG(ANA_POL_HYST, 0x008cb4),
+ REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9959_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9959_s2_regmap[] = {
+ REG(S2_CORE_UPDATE_CTRL, 0x000000),
+ REG(S2_CORE_MV_CFG, 0x000004),
+ REG(S2_CACHE_ENTRY_DAT, 0x000008),
+ REG(S2_CACHE_MASK_DAT, 0x000108),
+ REG(S2_CACHE_ACTION_DAT, 0x000208),
+ REG(S2_CACHE_CNT_DAT, 0x000308),
+ REG(S2_CACHE_TG_DAT, 0x000388),
+};
+
+static const u32 vsc9959_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x00f460),
+ REG(QSYS_SWITCH_PORT_MODE, 0x00f480),
+ REG(QSYS_STAT_CNT_CFG, 0x00f49c),
+ REG(QSYS_EEE_CFG, 0x00f4a0),
+ REG(QSYS_EEE_THRES, 0x00f4b8),
+ REG(QSYS_IGR_NO_SHARING, 0x00f4bc),
+ REG(QSYS_EGR_NO_SHARING, 0x00f4c0),
+ REG(QSYS_SW_STATUS, 0x00f4c4),
+ REG(QSYS_EXT_CPU_CFG, 0x00f4e0),
+ REG_RESERVED(QSYS_PAD_CFG),
+ REG(QSYS_CPU_GROUP_MAP, 0x00f4e8),
+ REG_RESERVED(QSYS_QMAP),
+ REG_RESERVED(QSYS_ISDX_SGRP),
+ REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+ REG(QSYS_TFRM_MISC, 0x00f50c),
+ REG(QSYS_TFRM_PORT_DLY, 0x00f510),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530),
+ REG(QSYS_RED_PROFILE, 0x00f534),
+ REG(QSYS_RES_QOS_MODE, 0x00f574),
+ REG(QSYS_RES_CFG, 0x00c000),
+ REG(QSYS_RES_STAT, 0x00c004),
+ REG(QSYS_EGR_DROP_MODE, 0x00f578),
+ REG(QSYS_EQ_CTRL, 0x00f57c),
+ REG_RESERVED(QSYS_EVENTS_CORE),
+ REG(QSYS_QMAXSDU_CFG_0, 0x00f584),
+ REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0),
+ REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc),
+ REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8),
+ REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4),
+ REG(QSYS_QMAXSDU_CFG_5, 0x00f610),
+ REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
+ REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
+ REG(QSYS_PREEMPTION_CFG, 0x00f664),
+ REG_RESERVED(QSYS_CIR_CFG),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG_RESERVED(QSYS_SE_CONNECT),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG_RESERVED(QSYS_SE_STATE),
+ REG(QSYS_HSCH_MISC_CFG, 0x00f67c),
+ REG(QSYS_TAG_CONFIG, 0x00f680),
+ REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698),
+ REG(QSYS_PORT_MAX_SDU, 0x00f69c),
+ REG(QSYS_PARAM_CFG_REG_1, 0x00f440),
+ REG(QSYS_PARAM_CFG_REG_2, 0x00f444),
+ REG(QSYS_PARAM_CFG_REG_3, 0x00f448),
+ REG(QSYS_PARAM_CFG_REG_4, 0x00f44c),
+ REG(QSYS_PARAM_CFG_REG_5, 0x00f450),
+ REG(QSYS_GCL_CFG_REG_1, 0x00f454),
+ REG(QSYS_GCL_CFG_REG_2, 0x00f458),
+ REG(QSYS_PARAM_STATUS_REG_1, 0x00f400),
+ REG(QSYS_PARAM_STATUS_REG_2, 0x00f404),
+ REG(QSYS_PARAM_STATUS_REG_3, 0x00f408),
+ REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c),
+ REG(QSYS_PARAM_STATUS_REG_5, 0x00f410),
+ REG(QSYS_PARAM_STATUS_REG_6, 0x00f414),
+ REG(QSYS_PARAM_STATUS_REG_7, 0x00f418),
+ REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c),
+ REG(QSYS_PARAM_STATUS_REG_9, 0x00f420),
+ REG(QSYS_GCL_STATUS_REG_1, 0x00f424),
+ REG(QSYS_GCL_STATUS_REG_2, 0x00f428),
+};
+
+static const u32 vsc9959_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_RED_TAG_CFG, 0x000058),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000410),
+ REG(REW_DSCP_REMAP_CFG, 0x000510),
+ REG_RESERVED(REW_STAT_CFG),
+ REG_RESERVED(REW_REW_STICKY),
+ REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9959_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_COLLISION, 0x000210),
+ REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_64, 0x00021c),
+ REG(SYS_COUNT_TX_65_127, 0x000220),
+ REG(SYS_COUNT_TX_128_511, 0x000224),
+ REG(SYS_COUNT_TX_512_1023, 0x000228),
+ REG(SYS_COUNT_TX_1024_1526, 0x00022c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_RESET_CFG, 0x000e00),
+ REG(SYS_SR_ETYPE_CFG, 0x000e04),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
+ REG(SYS_PORT_MODE, 0x000e0c),
+ REG(SYS_FRONT_PORT_MODE, 0x000e2c),
+ REG(SYS_FRM_AGING, 0x000e44),
+ REG(SYS_STAT_CFG, 0x000e48),
+ REG(SYS_SW_STATUS, 0x000e4c),
+ REG_RESERVED(SYS_MISC_CFG),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c),
+ REG(SYS_REW_MAC_LOW_CFG, 0x000e84),
+ REG(SYS_TIMESTAMP_OFFSET, 0x000e9c),
+ REG(SYS_PAUSE_CFG, 0x000ea0),
+ REG(SYS_PAUSE_TOT_CFG, 0x000ebc),
+ REG(SYS_ATOP, 0x000ec0),
+ REG(SYS_ATOP_TOT_CFG, 0x000edc),
+ REG(SYS_MAC_FC_CFG, 0x000ee0),
+ REG(SYS_MMGT, 0x000ef8),
+ REG_RESERVED(SYS_MMGT_FAST),
+ REG_RESERVED(SYS_EVENTS_DIF),
+ REG_RESERVED(SYS_EVENTS_CORE),
+ REG_RESERVED(SYS_CNT),
+ REG(SYS_PTP_STATUS, 0x000f14),
+ REG(SYS_PTP_TXSTAMP, 0x000f18),
+ REG(SYS_PTP_NXT, 0x000f1c),
+ REG(SYS_PTP_CFG, 0x000f20),
+ REG(SYS_RAM_INIT, 0x000f24),
+ REG_RESERVED(SYS_CM_ADDR),
+ REG_RESERVED(SYS_CM_DATA_WR),
+ REG_RESERVED(SYS_CM_DATA_RD),
+ REG_RESERVED(SYS_CM_OP),
+ REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9959_gcb_regmap[] = {
+ REG(GCB_SOFT_RST, 0x000004),
+};
+
+static const u32 *vsc9959_regmap[] = {
+ [ANA] = vsc9959_ana_regmap,
+ [QS] = vsc9959_qs_regmap,
+ [QSYS] = vsc9959_qsys_regmap,
+ [REW] = vsc9959_rew_regmap,
+ [SYS] = vsc9959_sys_regmap,
+ [S2] = vsc9959_s2_regmap,
+ [GCB] = vsc9959_gcb_regmap,
+};
+
+/* Addresses are relative to the PCI device's base address and
+ * will be fixed up at ioremap time.
+ */
+static struct resource vsc9959_target_io_res[] = {
+ [ANA] = {
+ .start = 0x0280000,
+ .end = 0x028ffff,
+ .name = "ana",
+ },
+ [QS] = {
+ .start = 0x0080000,
+ .end = 0x00800ff,
+ .name = "qs",
+ },
+ [QSYS] = {
+ .start = 0x0200000,
+ .end = 0x021ffff,
+ .name = "qsys",
+ },
+ [REW] = {
+ .start = 0x0030000,
+ .end = 0x003ffff,
+ .name = "rew",
+ },
+ [SYS] = {
+ .start = 0x0010000,
+ .end = 0x001ffff,
+ .name = "sys",
+ },
+ [S2] = {
+ .start = 0x0060000,
+ .end = 0x00603ff,
+ .name = "s2",
+ },
+ [GCB] = {
+ .start = 0x0070000,
+ .end = 0x00701ff,
+ .name = "devcpu_gcb",
+ },
+};
+
+static struct resource vsc9959_port_io_res[] = {
+ {
+ .start = 0x0100000,
+ .end = 0x010ffff,
+ .name = "port0",
+ },
+ {
+ .start = 0x0110000,
+ .end = 0x011ffff,
+ .name = "port1",
+ },
+ {
+ .start = 0x0120000,
+ .end = 0x012ffff,
+ .name = "port2",
+ },
+ {
+ .start = 0x0130000,
+ .end = 0x013ffff,
+ .name = "port3",
+ },
+ {
+ .start = 0x0140000,
+ .end = 0x014ffff,
+ .name = "port4",
+ },
+ {
+ .start = 0x0150000,
+ .end = 0x015ffff,
+ .name = "port5",
+ },
+};
+
+static const struct reg_field vsc9959_regfields[] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
+ [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+};
+
+static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
+ { .offset = 0x00, .name = "rx_octets", },
+ { .offset = 0x01, .name = "rx_unicast", },
+ { .offset = 0x02, .name = "rx_multicast", },
+ { .offset = 0x03, .name = "rx_broadcast", },
+ { .offset = 0x04, .name = "rx_shorts", },
+ { .offset = 0x05, .name = "rx_fragments", },
+ { .offset = 0x06, .name = "rx_jabbers", },
+ { .offset = 0x07, .name = "rx_crc_align_errs", },
+ { .offset = 0x08, .name = "rx_sym_errs", },
+ { .offset = 0x09, .name = "rx_frames_below_65_octets", },
+ { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
+ { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
+ { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
+ { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
+ { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
+ { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
+ { .offset = 0x10, .name = "rx_pause", },
+ { .offset = 0x11, .name = "rx_control", },
+ { .offset = 0x12, .name = "rx_longs", },
+ { .offset = 0x13, .name = "rx_classified_drops", },
+ { .offset = 0x14, .name = "rx_red_prio_0", },
+ { .offset = 0x15, .name = "rx_red_prio_1", },
+ { .offset = 0x16, .name = "rx_red_prio_2", },
+ { .offset = 0x17, .name = "rx_red_prio_3", },
+ { .offset = 0x18, .name = "rx_red_prio_4", },
+ { .offset = 0x19, .name = "rx_red_prio_5", },
+ { .offset = 0x1A, .name = "rx_red_prio_6", },
+ { .offset = 0x1B, .name = "rx_red_prio_7", },
+ { .offset = 0x1C, .name = "rx_yellow_prio_0", },
+ { .offset = 0x1D, .name = "rx_yellow_prio_1", },
+ { .offset = 0x1E, .name = "rx_yellow_prio_2", },
+ { .offset = 0x1F, .name = "rx_yellow_prio_3", },
+ { .offset = 0x20, .name = "rx_yellow_prio_4", },
+ { .offset = 0x21, .name = "rx_yellow_prio_5", },
+ { .offset = 0x22, .name = "rx_yellow_prio_6", },
+ { .offset = 0x23, .name = "rx_yellow_prio_7", },
+ { .offset = 0x24, .name = "rx_green_prio_0", },
+ { .offset = 0x25, .name = "rx_green_prio_1", },
+ { .offset = 0x26, .name = "rx_green_prio_2", },
+ { .offset = 0x27, .name = "rx_green_prio_3", },
+ { .offset = 0x28, .name = "rx_green_prio_4", },
+ { .offset = 0x29, .name = "rx_green_prio_5", },
+ { .offset = 0x2A, .name = "rx_green_prio_6", },
+ { .offset = 0x2B, .name = "rx_green_prio_7", },
+ { .offset = 0x80, .name = "tx_octets", },
+ { .offset = 0x81, .name = "tx_unicast", },
+ { .offset = 0x82, .name = "tx_multicast", },
+ { .offset = 0x83, .name = "tx_broadcast", },
+ { .offset = 0x84, .name = "tx_collision", },
+ { .offset = 0x85, .name = "tx_drops", },
+ { .offset = 0x86, .name = "tx_pause", },
+ { .offset = 0x87, .name = "tx_frames_below_65_octets", },
+ { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
+ { .offset = 0x89, .name = "tx_frames_128_255_octets", },
+ { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
+ { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
+ { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
+ { .offset = 0x8E, .name = "tx_yellow_prio_0", },
+ { .offset = 0x8F, .name = "tx_yellow_prio_1", },
+ { .offset = 0x90, .name = "tx_yellow_prio_2", },
+ { .offset = 0x91, .name = "tx_yellow_prio_3", },
+ { .offset = 0x92, .name = "tx_yellow_prio_4", },
+ { .offset = 0x93, .name = "tx_yellow_prio_5", },
+ { .offset = 0x94, .name = "tx_yellow_prio_6", },
+ { .offset = 0x95, .name = "tx_yellow_prio_7", },
+ { .offset = 0x96, .name = "tx_green_prio_0", },
+ { .offset = 0x97, .name = "tx_green_prio_1", },
+ { .offset = 0x98, .name = "tx_green_prio_2", },
+ { .offset = 0x99, .name = "tx_green_prio_3", },
+ { .offset = 0x9A, .name = "tx_green_prio_4", },
+ { .offset = 0x9B, .name = "tx_green_prio_5", },
+ { .offset = 0x9C, .name = "tx_green_prio_6", },
+ { .offset = 0x9D, .name = "tx_green_prio_7", },
+ { .offset = 0x9E, .name = "tx_aged", },
+ { .offset = 0x100, .name = "drop_local", },
+ { .offset = 0x101, .name = "drop_tail", },
+ { .offset = 0x102, .name = "drop_yellow_prio_0", },
+ { .offset = 0x103, .name = "drop_yellow_prio_1", },
+ { .offset = 0x104, .name = "drop_yellow_prio_2", },
+ { .offset = 0x105, .name = "drop_yellow_prio_3", },
+ { .offset = 0x106, .name = "drop_yellow_prio_4", },
+ { .offset = 0x107, .name = "drop_yellow_prio_5", },
+ { .offset = 0x108, .name = "drop_yellow_prio_6", },
+ { .offset = 0x109, .name = "drop_yellow_prio_7", },
+ { .offset = 0x10A, .name = "drop_green_prio_0", },
+ { .offset = 0x10B, .name = "drop_green_prio_1", },
+ { .offset = 0x10C, .name = "drop_green_prio_2", },
+ { .offset = 0x10D, .name = "drop_green_prio_3", },
+ { .offset = 0x10E, .name = "drop_green_prio_4", },
+ { .offset = 0x10F, .name = "drop_green_prio_5", },
+ { .offset = 0x110, .name = "drop_green_prio_6", },
+ { .offset = 0x111, .name = "drop_green_prio_7", },
+};
+
+#define VSC9959_INIT_TIMEOUT 50000
+#define VSC9959_GCB_RST_SLEEP 100
+#define VSC9959_SYS_RAMINIT_SLEEP 80
+
+static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+ int val;
+
+ regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+
+ return val;
+}
+
+static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, SYS_RAM_INIT);
+}
+
+static int vsc9959_reset(struct ocelot *ocelot)
+{
+ int val, err;
+
+ /* soft-reset the switch core */
+ regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+
+ err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
+ VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch core reset\n");
+ return err;
+ }
+
+ /* initialize switch mem ~40us */
+ ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT);
+ err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val,
+ VSC9959_SYS_RAMINIT_SLEEP,
+ VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch sram init\n");
+ return err;
+ }
+
+ /* enable switch core */
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ return 0;
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+};
+
+struct felix_info felix_info_vsc9959 = {
+ .target_io_res = vsc9959_target_io_res,
+ .port_io_res = vsc9959_port_io_res,
+ .regfields = vsc9959_regfields,
+ .map = vsc9959_regmap,
+ .ops = &vsc9959_ops,
+ .stats_layout = vsc9959_stats_layout,
+ .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .shared_queue_sz = 128 * 1024,
+ .num_ports = 6,
+ .pci_bar = 4,
+};
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 36c6ed98f8e7..e548289df31e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -639,7 +639,8 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i, phy_mode = -1;
+ phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA;
+ int ret, i;
u32 mask;
/* Make sure that port 0 is the cpu port */
@@ -661,10 +662,10 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode);
+ if (ret) {
pr_err("Can't find phy-mode for master device\n");
- return phy_mode;
+ return ret;
}
ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
if (ret < 0)
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index ffac0ea4e8d5..0fe1ae173aa1 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -28,6 +28,7 @@ config NET_DSA_SJA1105_TAS
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
help
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 91063ed3ef1b..d801fc204d19 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,6 +20,11 @@
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
#include "sja1105_tas.h"
#include "sja1105_ptp.h"
@@ -35,6 +40,8 @@ struct sja1105_regs {
u64 ptp_control;
u64 ptpclkval;
u64 ptpclkrate;
+ u64 ptpclkcorp;
+ u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
@@ -71,15 +78,15 @@ struct sja1105_info {
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
- int (*ptp_cmd)(const struct dsa_switch *ds,
- const struct sja1105_ptp_cmd *cmd);
- int (*reset_cmd)(const void *ctx, const void *data);
+ int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
const char *name;
};
@@ -109,22 +116,27 @@ struct sja1105_spi_message {
u64 address;
};
-typedef enum {
- SPI_READ = 0,
- SPI_WRITE = 1,
-} sja1105_spi_rw_mode_t;
-
/* From sja1105_main.c */
-int sja1105_static_config_reload(struct sja1105_private *priv);
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_RX_HWTSTAMPING,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
/* From sja1105_spi.c */
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
u8 *buf, size_t len);
int sja1105_xfer_u32(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value);
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
int sja1105_xfer_u64(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value);
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 2ae84a9dea59..b60224c55244 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -584,8 +584,9 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
for_each_child_of_node(ports_node, child) {
struct device_node *phy_node;
- int phy_mode;
+ phy_interface_t phy_mode;
u32 index;
+ int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &index) < 0) {
@@ -596,8 +597,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
/* Get PHY mode from DT */
- phy_mode = of_get_phy_mode(child);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
@@ -1340,17 +1341,33 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, br, false);
}
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+};
+
/* For situations where we need to change a setting at runtime that is only
* available through the static configuration, resetting the switch in order
* to upload the new static config is unavoidable. Back up the settings we
* modify at runtime (currently only MAC) and restore them after uploading,
* such that this operation is relatively seamless.
*/
-int sja1105_static_config_reload(struct sja1105_private *priv)
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
struct sja1105_mac_config_entry *mac;
int speed_mbps[SJA1105_NUM_PORTS];
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -1364,10 +1381,41 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
mac[i].speed = SJA1105_SPEED_AUTO;
}
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
/* Reset switch and send updated static configuration */
rc = sja1105_static_config_upload(priv);
if (rc < 0)
- goto out;
+ goto out_unlock_ptp;
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+out_unlock_ptp:
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
/* Configure the CGU (PLLs) for MII and RMII PHYs.
* For these interfaces there is no dynamic configuration
@@ -1383,6 +1431,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
goto out;
}
out:
+ mutex_unlock(&priv->mgmt_lock);
+
return rc;
}
@@ -1561,7 +1611,7 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
l2_lookup_params = table->entries;
l2_lookup_params->shared_learn = !enabled;
- rc = sja1105_static_config_reload(priv);
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1833,7 +1883,7 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
l2_lookup_params->maxage = maxage;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
@@ -1972,7 +2022,8 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
u64 part_no;
int rc;
- rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 783100397f8a..54258a25031d 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
+#include <linux/spi/spi.h>
#include "sja1105.h"
/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
@@ -101,7 +102,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
priv->tagger_data.stampable_skb = NULL;
}
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
}
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
@@ -192,42 +193,54 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port,
return 0;
}
-int sja1105et_ptp_cmd(const struct dsa_switch *ds,
- const struct sja1105_ptp_cmd *cmd)
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
{
- const struct sja1105_private *priv = ds->priv;
- const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
- u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 2, 2, size);
- sja1105_pack(buf, &cmd->corrclk4ts, 1, 1, size);
- sja1105_pack(buf, &cmd->ptpclkadd, 0, 0, size);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
- SJA1105_SIZE_PTP_CMD);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
}
-int sja1105pqrs_ptp_cmd(const struct dsa_switch *ds,
- const struct sja1105_ptp_cmd *cmd)
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
{
const struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- const int size = SJA1105_SIZE_PTP_CMD;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
- /* No need to keep this as part of the structure */
- u64 valid = 1;
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 3, 3, size);
- sja1105_pack(buf, &cmd->corrclk4ts, 2, 2, size);
- sja1105_pack(buf, &cmd->ptpclkadd, 0, 0, size);
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
- SJA1105_SIZE_PTP_CMD);
+ return rc;
}
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
@@ -335,19 +348,23 @@ static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
}
/* Caller must hold ptp_data->lock */
-static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks)
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
{
const struct sja1105_regs *regs = priv->info->regs;
- return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks);
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
}
/* Caller must hold ptp_data->lock */
-static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks)
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
{
const struct sja1105_regs *regs = priv->info->regs;
- return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks);
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
}
#define rxtstamp_to_tagger(d) \
@@ -370,7 +387,7 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
u64 ticks, ts;
int rc;
- rc = sja1105_ptpclkval_read(priv, &ticks);
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
if (rc < 0) {
dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
kfree_skb(skb);
@@ -423,7 +440,7 @@ bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
return true;
}
-int sja1105_ptp_reset(struct dsa_switch *ds)
+static int sja1105_ptp_reset(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
@@ -433,26 +450,49 @@ int sja1105_ptp_reset(struct dsa_switch *ds)
mutex_lock(&ptp_data->lock);
cmd.resptp = 1;
+
dev_dbg(ds->dev, "Resetting PTP clock\n");
- rc = priv->info->ptp_cmd(ds, &cmd);
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
+
+ sja1105_tas_clockstep(priv->ds);
mutex_unlock(&ptp_data->lock);
return rc;
}
-static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
+
+ *ns = sja1105_ticks_to_ns(ticks);
+
+ return 0;
+}
+
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
- u64 ticks = 0;
+ u64 now = 0;
int rc;
mutex_lock(&ptp_data->lock);
- rc = sja1105_ptpclkval_read(priv, &ticks);
- *ts = ns_to_timespec64(sja1105_ticks_to_ns(ticks));
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
mutex_unlock(&ptp_data->lock);
@@ -470,28 +510,42 @@ static int sja1105_ptp_mode_set(struct sja1105_private *priv,
ptp_data->cmd.ptpclkadd = mode;
- return priv->info->ptp_cmd(priv->ds, &ptp_data->cmd);
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
}
/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
- u64 ticks = ns_to_sja1105_ticks(timespec64_to_ns(ts));
+ u64 ns = timespec64_to_ns(ts);
int rc;
mutex_lock(&ptp_data->lock);
- rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
- if (rc < 0) {
- dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
- goto out;
- }
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
- rc = sja1105_ptpclkval_write(priv, ticks);
-out:
mutex_unlock(&ptp_data->lock);
return rc;
@@ -516,7 +570,10 @@ static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
mutex_lock(&ptp_data->lock);
- rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32);
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
+
+ sja1105_tas_adjfreq(priv->ds);
mutex_unlock(&ptp_data->lock);
@@ -524,24 +581,35 @@ static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
}
/* Write to PTPCLKVAL while PTPCLKADD is 1 */
-static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
- struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
- struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ struct sja1105_private *priv = ds->priv;
s64 ticks = ns_to_sja1105_ticks(delta);
int rc;
- mutex_lock(&ptp_data->lock);
-
rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
if (rc < 0) {
dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
- goto out;
+ return rc;
}
- rc = sja1105_ptpclkval_write(priv, ticks);
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
-out:
mutex_unlock(&ptp_data->lock);
return rc;
@@ -558,7 +626,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.name = "SJA1105 PHC",
.adjfine = sja1105_ptp_adjfine,
.adjtime = sja1105_ptp_adjtime,
- .gettime64 = sja1105_ptp_gettime,
+ .gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
.max_adj = SJA1105_MAX_ADJ_PPB,
};
@@ -604,7 +672,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
mutex_lock(&ptp_data->lock);
- rc = sja1105_ptpclkval_read(priv, &ticks);
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
if (rc < 0) {
dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
kfree_skb(skb);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 243f130374d2..470f44b76318 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -22,6 +22,8 @@ static inline s64 sja1105_ticks_to_ns(s64 ticks)
}
struct sja1105_ptp_cmd {
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
u64 resptp; /* reset */
u64 corrclk4ts; /* use the corrected clock for timestamps */
u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
@@ -39,11 +41,11 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds);
void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
-int sja1105et_ptp_cmd(const struct dsa_switch *ds,
- const struct sja1105_ptp_cmd *cmd);
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
-int sja1105pqrs_ptp_cmd(const struct dsa_switch *ds,
- const struct sja1105_ptp_cmd *cmd);
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *ts);
@@ -51,8 +53,6 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port,
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
struct sk_buff *clone);
-int sja1105_ptp_reset(struct dsa_switch *ds);
-
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
@@ -63,6 +63,17 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
+
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
+
#else
struct sja1105_ptp_cmd;
@@ -87,14 +98,33 @@ static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
{
}
-static inline int sja1105_ptp_reset(struct dsa_switch *ds)
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ return 0;
+}
+
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
{
return 0;
}
-#define sja1105et_ptp_cmd NULL
+#define sja1105et_ptp_cmd_packing NULL
-#define sja1105pqrs_ptp_cmd NULL
+#define sja1105pqrs_ptp_cmd_packing NULL
#define sja1105_get_ts_info NULL
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index ed02410a9366..29b127f3bf9c 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -42,9 +42,9 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
* - SPI_READ: creates and sends an SPI read message from absolute
* address reg_addr, writing @len bytes into *buf
*/
-int sja1105_xfer_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u8 *buf, size_t len)
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
{
struct sja1105_chunk chunk = {
.len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
@@ -81,6 +81,7 @@ int sja1105_xfer_buf(const struct sja1105_private *priv,
struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
+ struct spi_transfer *ptp_sts_xfer;
struct sja1105_spi_message msg;
/* Populate the transfer's header buffer */
@@ -102,6 +103,26 @@ int sja1105_xfer_buf(const struct sja1105_private *priv,
chunk_xfer->tx_buf = chunk.buf;
chunk_xfer->len = chunk.len;
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
/* Calculate next chunk */
chunk.buf += chunk.len;
chunk.reg_addr += chunk.len / 4;
@@ -123,6 +144,13 @@ int sja1105_xfer_buf(const struct sja1105_private *priv,
return rc;
}
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
+}
+
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
* address reg_addr
@@ -133,7 +161,8 @@ int sja1105_xfer_buf(const struct sja1105_private *priv,
* CPU endianness and directly usable by software running on the core.
*/
int sja1105_xfer_u64(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value)
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
u8 packed_buf[8];
int rc;
@@ -141,7 +170,7 @@ int sja1105_xfer_u64(const struct sja1105_private *priv,
if (rw == SPI_WRITE)
sja1105_pack(packed_buf, value, 63, 0, 8);
- rc = sja1105_xfer_buf(priv, rw, reg_addr, packed_buf, 8);
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
if (rw == SPI_READ)
sja1105_unpack(packed_buf, value, 63, 0, 8);
@@ -151,7 +180,8 @@ int sja1105_xfer_u64(const struct sja1105_private *priv,
/* Same as above, but transfers only a 4 byte word */
int sja1105_xfer_u32(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value)
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
u8 packed_buf[4];
u64 tmp;
@@ -165,7 +195,7 @@ int sja1105_xfer_u32(const struct sja1105_private *priv,
sja1105_pack(packed_buf, &tmp, 31, 0, 4);
}
- rc = sja1105_xfer_buf(priv, rw, reg_addr, packed_buf, 4);
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
if (rw == SPI_READ) {
sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
@@ -175,116 +205,34 @@ int sja1105_xfer_u32(const struct sja1105_private *priv,
return rc;
}
-/* Back-ported structure from UM11040 Table 112.
- * Reset control register (addr. 100440h)
- * In the SJA1105 E/T, only warm_rst and cold_rst are
- * supported (exposed in UM10944 as rst_ctrl), but the bit
- * offsets of warm_rst and cold_rst are actually reversed.
- */
-struct sja1105_reset_cmd {
- u64 switch_rst;
- u64 cfg_rst;
- u64 car_rst;
- u64 otp_rst;
- u64 warm_rst;
- u64 cold_rst;
- u64 por_rst;
-};
-
-static void
-sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
-
- sja1105_pack(buf, &reset->cold_rst, 3, 3, size);
- sja1105_pack(buf, &reset->warm_rst, 2, 2, size);
-}
-
-static void
-sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
-{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
-
- sja1105_pack(buf, &reset->switch_rst, 8, 8, size);
- sja1105_pack(buf, &reset->cfg_rst, 7, 7, size);
- sja1105_pack(buf, &reset->car_rst, 5, 5, size);
- sja1105_pack(buf, &reset->otp_rst, 4, 4, size);
- sja1105_pack(buf, &reset->warm_rst, 3, 3, size);
- sja1105_pack(buf, &reset->cold_rst, 2, 2, size);
- sja1105_pack(buf, &reset->por_rst, 1, 1, size);
-}
-
-static int sja1105et_reset_cmd(const void *ctx, const void *data)
-{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst ||
- reset->cfg_rst ||
- reset->car_rst ||
- reset->otp_rst ||
- reset->por_rst) {
- dev_err(dev, "Only warm and cold reset is supported "
- "for SJA1105 E/T!\n");
- return -EINVAL;
- }
-
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
- sja1105et_reset_cmd_pack(packed_buf, reset);
+ sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
SJA1105_SIZE_RESET_CMD);
}
-static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst)
- dev_dbg(dev, "Main reset for all functional modules requested\n");
- if (reset->cfg_rst)
- dev_dbg(dev, "Chip configuration reset requested\n");
- if (reset->car_rst)
- dev_dbg(dev, "Clock and reset control logic reset requested\n");
- if (reset->otp_rst)
- dev_dbg(dev, "OTP read cycle for reading product "
- "config settings requested\n");
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
- if (reset->por_rst)
- dev_dbg(dev, "Power-on reset requested\n");
-
- sja1105pqrs_reset_cmd_pack(packed_buf, reset);
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
+
+ sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
SJA1105_SIZE_RESET_CMD);
}
-static int sja1105_cold_reset(const struct sja1105_private *priv)
-{
- struct sja1105_reset_cmd reset = {0};
-
- reset.cold_rst = 1;
- return priv->info->reset_cmd(priv, &reset);
-}
-
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited)
{
@@ -293,7 +241,7 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
int rc;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
- &inhibit_cmd);
+ &inhibit_cmd, NULL);
if (rc < 0)
return rc;
@@ -303,7 +251,7 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
inhibit_cmd &= ~port_bitmap;
return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
- &inhibit_cmd);
+ &inhibit_cmd, NULL);
}
struct sja1105_status {
@@ -429,7 +377,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
usleep_range(500, 1000);
do {
/* Put the SJA1105 in programming mode */
- rc = sja1105_cold_reset(priv);
+ rc = priv->info->reset_cmd(priv->ds);
if (rc < 0) {
dev_err(dev, "Failed to reset switch, retrying...\n");
continue;
@@ -481,12 +429,6 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
- rc = sja1105_ptp_reset(priv->ds);
- if (rc < 0)
- dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
-
- dev_info(dev, "Reset switch and programmed static config\n");
-
out:
kfree(config_buf);
return rc;
@@ -515,9 +457,11 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
.ptp_control = 0x17,
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
+ .ptpclkcorp = 0x1D,
};
static struct sja1105_regs sja1105pqrs_regs = {
@@ -545,9 +489,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
.ptp_control = 0x18,
.ptpclkval = 0x19,
.ptpclkrate = 0x1B,
+ .ptpclkcorp = 0x1E,
};
struct sja1105_info sja1105e_info = {
@@ -560,7 +506,7 @@ struct sja1105_info sja1105e_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105E",
};
@@ -574,7 +520,7 @@ struct sja1105_info sja1105t_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105T",
};
@@ -589,7 +535,7 @@ struct sja1105_info sja1105p_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105P",
};
@@ -604,7 +550,7 @@ struct sja1105_info sja1105q_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105Q",
};
@@ -619,7 +565,7 @@ struct sja1105_info sja1105r_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105R",
};
@@ -635,6 +581,6 @@ struct sja1105_info sja1105s_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.name = "SJA1105S",
};
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 33eca6a82ec5..26b925b5dace 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -10,6 +10,11 @@
#define SJA1105_TAS_MAX_DELTA BIT(19)
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
/* This is not a preprocessor macro because the "ns" argument may or may not be
* s64 at caller side. This ensures it is properly type-cast before div_s64.
*/
@@ -18,6 +23,100 @@ static s64 ns_to_sja1105_delta(s64 ns)
return div_s64(ns, 200);
}
+static s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
/* Lo and behold: the egress scheduler from hell.
*
* At the hardware level, the Time-Aware Shaper holds a global linear arrray of
@@ -99,7 +198,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
int num_cycles = 0;
int cycle = 0;
int i, k = 0;
- int port;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
/* Discard previous Schedule Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
@@ -184,11 +287,13 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_entry_points = table->entries;
/* Finally start populating the static config tables */
- schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE;
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
offload = tas_data->offload[port];
if (!offload)
@@ -196,15 +301,21 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_start_idx = k;
schedule_end_idx = k + offload->num_entries - 1;
- /* TODO this is the base time for the port's subschedule,
- * relative to PTPSCHTM. But as we're using the standalone
- * clock source and not PTP clock as time reference, there's
- * little point in even trying to put more logic into this,
- * like preserving the phases between the subschedules of
- * different ports. We'll get all of that when switching to the
- * PTP clock source.
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
*/
- entry_point_delta = 1;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
schedule_entry_points[cycle].subschindx = cycle;
schedule_entry_points[cycle].delta = entry_point_delta;
@@ -352,7 +463,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
}
/* The cycle time extension is the amount of time the last cycle from
@@ -400,11 +511,306 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
}
void sja1105_tas_setup(struct dsa_switch *ds)
{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
}
void sja1105_tas_teardown(struct dsa_switch *ds)
@@ -413,6 +819,8 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
struct tc_taprio_qopt_offload *offload;
int port;
+ cancel_work_sync(&priv->tas_data.tas_work);
+
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0aad212d88b2..b226c3dfd5b1 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -8,8 +8,27 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
struct sja1105_tas_data {
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
};
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
@@ -19,6 +38,10 @@ void sja1105_tas_setup(struct dsa_switch *ds);
void sja1105_tas_teardown(struct dsa_switch *ds);
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
#else
/* C doesn't allow empty structures, bah! */
@@ -36,6 +59,10 @@ static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 54e4d8b07f0e..3031a5fc5427 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -51,41 +51,15 @@ static void set_multicast_list(struct net_device *dev)
{
}
-struct pcpu_dstats {
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static void dummy_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_dstats *dstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
- start = u64_stats_fetch_begin_irq(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpackets;
- }
+ dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes);
}
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += skb->len;
- u64_stats_update_end(&dstats->syncp);
+ dev_lstats_add(dev, skb->len);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
@@ -94,8 +68,8 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
- if (!dev->dstats)
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
return -ENOMEM;
return 0;
@@ -103,7 +77,7 @@ static int dummy_dev_init(struct net_device *dev)
static void dummy_dev_uninit(struct net_device *dev)
{
- free_percpu(dev->dstats);
+ free_percpu(dev->lstats);
}
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bb032be7fe31..4cd53fc338b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -730,12 +730,12 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
struct device_node *np = priv->device->of_node;
- int ret = 0;
+ int ret;
- priv->phy_iface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np, &priv->phy_iface);
/* Avoid get phy addr and create mdio if no phy is present */
- if (!priv->phy_iface)
+ if (ret)
return 0;
/* try to get PHY address from device tree, use PHY autodetection if
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 0020726db204..6e0a6e234483 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -4,15 +4,8 @@
# aQuantia Ethernet Controller AQtion Linux Driver
# Copyright(c) 2014-2017 aQuantia Corporation.
#
-# Contact Information: <rdc-drv@aquantia.com>
-# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
-#
################################################################################
-#
-# Makefile for the AQtion(tm) Ethernet driver
-#
-
obj-$(CONFIG_AQTION) += atlantic.o
atlantic-objs := aq_main.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 8c633caf79d2..f0c41f7408e5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -70,14 +70,11 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
+/* Default WOL modes used on initialization */
+#define AQ_CFG_WOL_MODES WAKE_MAGIC
+
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
#define AQ_CFG_IS_AUTONEG_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1ae8aabcc41a..a1f99bef4a68 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -18,7 +18,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
memset(p, 0, regs_count * sizeof(u32));
aq_nic_get_regs(aq_nic, regs, p);
@@ -27,7 +29,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
static int aq_ethtool_get_regs_len(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
return regs_count * sizeof(u32);
}
@@ -92,11 +96,21 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
+static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "DMASystemLoopback",
+ "PKTSystemLoopback",
+ "DMANetworkLoopback",
+ "PHYInternalLoopback",
+ "PHYExternalLoopback",
+};
+
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
@@ -107,11 +121,15 @@ static void aq_ethtool_stats(struct net_device *ndev,
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
- u32 firmware_version = aq_nic_get_fw_version(aq_nic);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 firmware_version;
+ u32 regs_count;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ firmware_version = aq_nic_get_fw_version(aq_nic);
+ regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
@@ -132,12 +150,15 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
- int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
u8 *p = data;
+ int i, si;
- if (stringset == ETH_SS_STATS) {
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
@@ -150,23 +171,63 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, aq_ethtool_priv_flag_names,
+ sizeof(aq_ethtool_priv_flag_names));
+ break;
}
}
-static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+static int aq_ethtool_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_hw_s *hw = aq_nic->aq_hw;
int ret = 0;
+
+ if (!aq_nic->aq_fw_ops->led_control)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK |
+ AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return ret;
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+ int ret = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
+ break;
default:
ret = -EOPNOTSUPP;
}
+
return ret;
}
@@ -178,7 +239,9 @@ static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
return sizeof(cfg->aq_rss.hash_secret_key);
}
@@ -187,9 +250,11 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
+ cfg = aq_nic_get_cfg(aq_nic);
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
@@ -199,6 +264,7 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
+
return 0;
}
@@ -242,9 +308,11 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
+
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
@@ -269,8 +337,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
static int aq_ethtool_set_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -291,7 +359,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
@@ -305,6 +375,7 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
coal->rx_max_coalesced_frames = 1;
coal->tx_max_coalesced_frames = 1;
}
+
return 0;
}
@@ -312,7 +383,9 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
/* This is not yet supported
*/
@@ -354,13 +427,12 @@ static void aq_ethtool_get_wol(struct net_device *ndev,
struct ethtool_wolinfo *wol)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
- if (cfg->wol)
- wol->wolopts |= WAKE_MAGIC;
+ wol->supported = AQ_NIC_WOL_MODES;
+ wol->wolopts = cfg->wol;
}
static int aq_ethtool_set_wol(struct net_device *ndev,
@@ -368,14 +440,17 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
- if (wol->wolopts & WAKE_MAGIC)
- cfg->wol |= AQ_NIC_WOL_ENABLED;
- else
- cfg->wol &= ~AQ_NIC_WOL_ENABLED;
- err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (wol->wolopts & ~AQ_NIC_WOL_MODES)
+ return -EOPNOTSUPP;
+
+ cfg->wol = wol->wolopts;
+
+ err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol);
return err;
}
@@ -513,7 +588,7 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 fc = aq_nic->aq_nic_cfg.flow_control;
+ u32 fc = aq_nic->aq_nic_cfg.fc.req;
pause->autoneg = 0;
@@ -535,14 +610,14 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
return -EOPNOTSUPP;
if (pause->rx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX;
if (pause->tx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX;
mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
@@ -555,23 +630,28 @@ static void aq_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- ring->rx_pending = aq_nic_cfg->rxds;
- ring->tx_pending = aq_nic_cfg->txds;
+ cfg = aq_nic_get_cfg(aq_nic);
- ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
- ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+ ring->rx_pending = cfg->rxds;
+ ring->tx_pending = cfg->txds;
+
+ ring->rx_max_pending = cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = cfg->aq_hw_caps->txds_max;
}
static int aq_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
- int err = 0;
- bool ndev_running = false;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
- const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+ const struct aq_hw_caps_s *hw_caps;
+ bool ndev_running = false;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ hw_caps = cfg->aq_hw_caps;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
err = -EOPNOTSUPP;
@@ -585,18 +665,18 @@ static int aq_set_ringparam(struct net_device *ndev,
aq_nic_free_vectors(aq_nic);
- aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
- aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
- aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+ cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
+ cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
- aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
- aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
- aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+ cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ cfg->txds = min(cfg->txds, hw_caps->txds_max);
+ cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
- for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
- aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
goto err_exit;
@@ -609,12 +689,61 @@ err_exit:
return err;
}
+static u32 aq_get_msg_level(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->msg_enable;
+}
+
+static void aq_set_msg_level(struct net_device *ndev, u32 data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic->msg_enable = data;
+}
+
+static u32 aq_ethtool_get_priv_flags(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->aq_nic_cfg.priv_flags;
+}
+
+static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 priv_flags;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ priv_flags = cfg->priv_flags;
+
+ if (flags & ~AQ_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ cfg->priv_flags = flags;
+
+ if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+
+ dev_open(ndev, NULL);
+ }
+ } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
+ aq_nic_set_loopback(aq_nic);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
+ .set_phys_id = aq_ethtool_set_phys_id,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
.get_wol = aq_ethtool_get_wol,
.set_wol = aq_ethtool_set_wol,
@@ -630,8 +759,12 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_msglevel = aq_get_msg_level,
+ .set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
+ .get_priv_flags = aq_ethtool_get_priv_flags,
+ .set_priv_flags = aq_ethtool_set_priv_flags,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 632b5531db4a..6d5be5ebeb13 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -12,5 +12,6 @@
#include "aq_common.h"
extern const struct ethtool_ops aq_ethtool_ops;
+#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 5246cf44ce51..cc70c606b6ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -119,6 +119,23 @@ struct aq_stats_s {
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+#define AQ_HW_LED_BLINK 0x2U
+#define AQ_HW_LED_DEFAULT 0x0U
+
+enum aq_priv_flags {
+ AQ_HW_LOOPBACK_DMA_SYS,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ AQ_HW_LOOPBACK_DMA_NET,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+};
+
+#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PKT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_DMA_NET) |\
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -137,6 +154,7 @@ struct aq_hw_s {
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
+ u32 settings_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
@@ -276,6 +294,8 @@ struct aq_hw_ops {
u64 *timestamp);
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
+
+ int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
};
struct aq_fw_ops {
@@ -304,6 +324,10 @@ struct aq_fw_ops {
int (*set_flow_control)(struct aq_hw_s *self);
+ int (*led_control)(struct aq_hw_s *self, u32 mode);
+
+ int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 9c7a226d81b6..7dbf49adcea6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -59,6 +59,7 @@ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
u64 value = aq_hw_read_reg(hw, reg);
value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+
return value;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index a26d4a69efad..538f460a3da7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -53,8 +53,8 @@ struct net_device *aq_ndev_alloc(void)
static int aq_ndev_open(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_init(aq_nic);
if (err < 0)
@@ -74,19 +74,20 @@ static int aq_ndev_open(struct net_device *ndev)
err_exit:
if (err < 0)
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
+
return err;
}
static int aq_ndev_close(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_stop(aq_nic);
if (err < 0)
goto err_exit;
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
err_exit:
return err;
@@ -120,7 +121,9 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+ int err;
+
+ err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
@@ -133,8 +136,8 @@ err_exit:
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
- bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
+ bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
bool need_ndev_restart = false;
struct aq_nic_cfg_s *aq_cfg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 433adc099e44..a17a4da7bc15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -41,10 +41,6 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- struct aq_rss_parameters *rss_params = &cfg->aq_rss;
- int i = 0;
-
static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
@@ -52,6 +48,11 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
};
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params;
+ int i = 0;
+
+ rss_params = &cfg->aq_rss;
rss_params->hash_secret_key_size = sizeof(rss_key);
memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
@@ -78,7 +79,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
- cfg->flow_control = AQ_CFG_FC_MODE;
+ cfg->fc.req = AQ_CFG_FC_MODE;
+ cfg->wol = AQ_CFG_WOL_MODES;
cfg->mtu = AQ_CFG_MTU_DEF;
cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
@@ -142,10 +144,14 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (err)
return err;
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ self->aq_nic_cfg.fc.cur = fc;
+
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
- pr_info("%s: link change old %d new %d\n",
- AQ_CFG_DRV_NAME, self->link_status.mbps,
- self->aq_hw->aq_link_status.mbps);
+ netdev_info(self->ndev, "%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
if (self->aq_ptp) {
@@ -159,8 +165,6 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
* on any link event.
* We should query FW whether it negotiated FC.
*/
- if (self->aq_fw_ops->get_flow_control)
- self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
@@ -179,6 +183,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
+
return 0;
}
@@ -193,6 +198,7 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
BIT(self->aq_nic_cfg.link_irq_vec));
+
return IRQ_HANDLED;
}
@@ -223,7 +229,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
- mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
aq_ndev_schedule_work(&self->service_task);
}
@@ -302,9 +309,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_SG |
NETIF_F_LRO | NETIF_F_TSO;
+ self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
@@ -324,8 +333,8 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
int aq_nic_init(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
self->power_state = AQ_HW_POWER_STATE_D0;
mutex_lock(&self->fwreq_mutex);
@@ -369,8 +378,8 @@ err_exit:
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
@@ -404,6 +413,8 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
+ aq_nic_set_loopback(self);
+
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
@@ -460,26 +471,45 @@ err_exit:
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
- unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int frag_count = 0U;
- unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
- struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
+ u8 ipver = ip_hdr(skb)->version;
+ struct aq_ring_buff_s *dx_buff;
bool need_context_tag = false;
+ unsigned int frag_count = 0U;
+ unsigned int ret = 0U;
+ unsigned int dx;
+ u8 l4proto = 0;
+
+ if (ipver == 4)
+ l4proto = ip_hdr(skb)->protocol;
+ else if (ipver == 6)
+ l4proto = ipv6_hdr(skb)->nexthdr;
+ dx = ring->sw_tail;
+ dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) {
dx_buff->mss = skb_shinfo(skb)->gso_size;
- dx_buff->is_gso = 1U;
+ if (l4proto == IPPROTO_TCP) {
+ dx_buff->is_gso_tcp = 1U;
+ dx_buff->len_l4 = tcp_hdrlen(skb);
+ } else if (l4proto == IPPROTO_UDP) {
+ dx_buff->is_gso_udp = 1U;
+ dx_buff->len_l4 = sizeof(struct udphdr);
+ /* UDP GSO Hardware does not replace packet length. */
+ udp_hdr(skb)->len = htons(dx_buff->mss +
+ dx_buff->len_l4);
+ } else {
+ WARN_ONCE(true, "Bad GSO mode");
+ goto exit;
+ }
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
- dx_buff->len_l3 = ip_hdrlen(skb);
- dx_buff->len_l4 = tcp_hdrlen(skb);
+ dx_buff->len_l3 = skb_network_header_len(skb);
dx_buff->eop_index = 0xffffU;
- dx_buff->is_ipv6 =
- (ip_hdr(skb)->version == 6) ? 1U : 0U;
+ dx_buff->is_ipv6 = (ipver == 6);
need_context_tag = true;
}
@@ -513,24 +543,9 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
- 1U : 0U;
-
- if (ip_hdr(skb)->version == 4) {
- dx_buff->is_tcp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
- 1U : 0U;
- } else if (ip_hdr(skb)->version == 6) {
- dx_buff->is_tcp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
- 1U : 0U;
- }
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
+ dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
+ dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
}
for (; nr_frags--; ++frag_count) {
@@ -585,7 +600,8 @@ mapping_error:
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
- if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
+ if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
+ !dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
@@ -606,11 +622,11 @@ exit:
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
+ unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
- unsigned int tc = 0U;
int err = NETDEV_TX_OK;
+ unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
@@ -623,6 +639,11 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
+ if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
@@ -703,6 +724,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
if (err < 0)
return err;
}
+
return aq_nic_set_packet_filter(self, packet_filter);
}
@@ -747,10 +769,10 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- unsigned int i = 0U;
- unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
+ unsigned int count = 0U;
+ unsigned int i = 0U;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@@ -800,8 +822,8 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
- struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct net_device *ndev = self->ndev;
ndev->stats.rx_packets = stats->dma_pkt_rc;
ndev->stats.rx_bytes = stats->dma_oct_rc;
@@ -846,9 +868,12 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Asym_Pause);
+ }
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
@@ -882,13 +907,13 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
+ if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
/* Asym is when either RX or TX, but not both */
- if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
- !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
+ if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
@@ -971,6 +996,44 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
return fw_version;
}
+int aq_nic_set_loopback(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_hw_ops->hw_set_loopback ||
+ !self->aq_fw_ops->set_phyloopback)
+ return -ENOTSUPP;
+
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PKT_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_NET,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_NET)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
+ mutex_unlock(&self->fwreq_mutex);
+
+ return 0;
+}
+
int aq_nic_stop(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@@ -1000,7 +1063,20 @@ int aq_nic_stop(struct aq_nic_s *self)
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
-void aq_nic_deinit(struct aq_nic_s *self)
+void aq_nic_set_power(struct aq_nic_s *self)
+{
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+}
+
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -1017,23 +1093,12 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit)) {
+ if (likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
}
- if (self->power_state != AQ_HW_POWER_STATE_D0 ||
- self->aq_hw->aq_nic_cfg->wol)
- if (likely(self->aq_fw_ops->set_power)) {
- mutex_lock(&self->fwreq_mutex);
- self->aq_fw_ops->set_power(self->aq_hw,
- self->power_state,
- self->ndev->dev_addr);
- mutex_unlock(&self->fwreq_mutex);
- }
-
-
err_exit:;
}
@@ -1054,44 +1119,6 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
-{
- int err = 0;
-
- if (!netif_running(self->ndev)) {
- err = 0;
- goto out;
- }
- rtnl_lock();
- if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
- self->power_state = AQ_HW_POWER_STATE_D3;
- netif_device_detach(self->ndev);
- netif_tx_stop_all_queues(self->ndev);
-
- err = aq_nic_stop(self);
- if (err < 0)
- goto err_exit;
-
- aq_nic_deinit(self);
- } else {
- err = aq_nic_init(self);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_start(self);
- if (err < 0)
- goto err_exit;
-
- netif_device_attach(self->ndev);
- netif_tx_start_all_queues(self->ndev);
- }
-
-err_exit:
- rtnl_unlock();
-out:
- return err;
-}
-
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@@ -1108,7 +1135,8 @@ void aq_nic_shutdown(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
}
- aq_nic_deinit(self);
+ aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(self);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index c2513b79b9e9..a752f8bb4b08 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -20,6 +20,18 @@ struct aq_vec_s;
struct aq_ptp_s;
enum aq_rx_filter_type;
+enum aq_fc_mode {
+ AQ_NIC_FC_OFF = 0,
+ AQ_NIC_FC_TX,
+ AQ_NIC_FC_RX,
+ AQ_NIC_FC_FULL,
+};
+
+struct aq_fc_info {
+ enum aq_fc_mode req;
+ enum aq_fc_mode cur;
+};
+
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
u64 features;
@@ -34,7 +46,7 @@ struct aq_nic_cfg_s {
u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
- u32 flow_control;
+ struct aq_fc_info fc;
u32 link_speed_msk;
u32 wol;
u8 is_vlan_rx_strip;
@@ -46,6 +58,7 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
+ u32 priv_flags;
u8 tcs;
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
@@ -60,7 +73,8 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
-#define AQ_NIC_WOL_ENABLED BIT(0)
+#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
+ WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
@@ -70,8 +84,8 @@ struct aq_hw_rx_fl2 {
};
struct aq_hw_rx_fl3l4 {
- u8 active_ipv4;
- u8 active_ipv6:2;
+ u8 active_ipv4;
+ u8 active_ipv6:2;
u8 is_ipv6;
u8 reserved_count;
};
@@ -87,6 +101,7 @@ struct aq_hw_rx_fltrs_s {
struct aq_nic_s {
atomic_t flags;
+ u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
@@ -141,7 +156,8 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
-void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
+void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
@@ -155,7 +171,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
const struct ethtool_link_ksettings *cmd);
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_set_loopback(struct aq_nic_s *self);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index e82c96b50373..2bb329606794 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -185,6 +185,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
+
return AQ_HW_IRQ_LEGACY;
}
@@ -196,12 +197,12 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self;
- int err;
struct net_device *ndev;
resource_size_t mmio_pa;
- u32 bar;
+ struct aq_nic_s *self;
u32 numvecs;
+ u32 bar;
+ int err;
err = pci_enable_device(pdev);
if (err)
@@ -311,6 +312,7 @@ err_ndev:
pci_release_regions(pdev);
err_pci_func:
pci_disable_device(pdev);
+
return err;
}
@@ -347,29 +349,98 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+static int aq_suspend_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
+ struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ nic->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(nic->ndev);
+ netif_tx_stop_all_queues(nic->ndev);
- return aq_nic_change_pm_state(self, &pm_msg);
+ aq_nic_stop(nic);
+
+ if (deep) {
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
+ }
+
+ rtnl_unlock();
+
+ return 0;
}
-static int aq_pci_resume(struct pci_dev *pdev)
+static int atl_resume_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct aq_nic_s *nic;
+ int ret;
+
+ nic = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (deep) {
+ ret = aq_nic_init(nic);
+ if (ret)
+ goto err_exit;
+ }
+
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+
+ netif_device_attach(nic->ndev);
+ netif_tx_start_all_queues(nic->ndev);
+
+err_exit:
+ rtnl_unlock();
- return aq_nic_change_pm_state(self, &pm_msg);
+ return ret;
}
+static int aq_pm_freeze(struct device *dev)
+{
+ return aq_suspend_common(dev, false);
+}
+
+static int aq_pm_suspend_poweroff(struct device *dev)
+{
+ return aq_suspend_common(dev, true);
+}
+
+static int aq_pm_thaw(struct device *dev)
+{
+ return atl_resume_common(dev, false);
+}
+
+static int aq_pm_resume_restore(struct device *dev)
+{
+ return atl_resume_common(dev, true);
+}
+
+static const struct dev_pm_ops aq_pm_ops = {
+ .suspend = aq_pm_suspend_poweroff,
+ .poweroff = aq_pm_suspend_poweroff,
+ .freeze = aq_pm_freeze,
+ .resume = aq_pm_resume_restore,
+ .restore = aq_pm_resume_restore,
+ .thaw = aq_pm_thaw,
+};
+
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
.shutdown = aq_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &aq_pm_ops,
+#endif
};
int aq_pci_func_register_driver(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 8175513e48c9..58e8c641e8b3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1057,7 +1057,7 @@ static struct ptp_clock_info aq_ptp_clock = {
ptp_offset[__idx].ingress = (__ingress); } \
while (0)
-static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
+static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
{
int i;
@@ -1098,7 +1098,7 @@ static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
}
}
-static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
+static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
{
memset(ptp_offset, 0, sizeof(ptp_offset));
@@ -1106,7 +1106,7 @@ static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
}
static void aq_ptp_gpio_init(struct ptp_clock_info *info,
- struct hw_aq_info *hw_info)
+ struct hw_atl_info *hw_info)
{
struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
u32 extts_pin_cnt = 0;
@@ -1207,7 +1207,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
aq_ptp->ptp_info = aq_ptp_clock;
aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
- if (!clock || IS_ERR(clock)) {
+ if (IS_ERR(clock)) {
netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
err = PTR_ERR(clock);
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f756cc0bbdf0..951d86f8b66e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -30,8 +30,8 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
struct device *dev)
{
struct page *page;
- dma_addr_t daddr;
int ret = -ENOMEM;
+ dma_addr_t daddr;
page = dev_alloc_pages(order);
if (unlikely(!page))
@@ -118,6 +118,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -144,6 +145,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -175,6 +177,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -207,6 +210,7 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
+
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index be3702a4dcc9..991e4d31b094 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -65,19 +65,20 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
- u16 len;
+ u32 len:16;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
- u32 is_gso:1;
+ u32 is_gso_tcp:1;
+ u32 is_gso_udp:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
u32 is_vlan:1;
- u32 rsvd3:5;
+ u32 rsvd3:4;
u16 eop_index;
u16 rsvd4;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index a95c263a45aa..f40a427970dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -103,8 +103,8 @@ err_exit:
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
- struct aq_vec_s *self = NULL;
struct aq_ring_s *ring = NULL;
+ struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
@@ -159,6 +159,7 @@ err_exit:
aq_vec_free(self);
self = NULL;
}
+
return self;
}
@@ -263,6 +264,7 @@ void aq_vec_deinit(struct aq_vec_s *self)
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
+
err_exit:;
}
@@ -361,9 +363,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
{
- unsigned int count = 0U;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
+ unsigned int count = 0U;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 359a4d387185..9b1062b8af64 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -119,10 +119,10 @@ err_exit:
static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
- unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
+ unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -155,7 +155,7 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req);
buff_size = HW_ATL_A0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -180,9 +180,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -207,12 +207,12 @@ err_exit:
static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -321,9 +321,9 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -352,10 +352,9 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
@@ -404,6 +403,7 @@ static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -411,6 +411,7 @@ static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -418,6 +419,7 @@ static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -425,6 +427,7 @@ static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -435,8 +438,8 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
@@ -451,7 +454,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
+ if (buff->is_gso_tcp) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
@@ -500,6 +503,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -507,8 +511,8 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -549,8 +553,8 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -599,8 +603,8 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- int err = 0;
unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+ int err = 0;
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -720,6 +724,7 @@ static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
+
return aq_hw_err_from_flags(self);
}
@@ -737,6 +742,7 @@ static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -859,6 +865,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
{
hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+
return aq_hw_err_from_flags(self);
}
@@ -866,6 +873,7 @@ static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -873,6 +881,7 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index c7297ca03624..58e891af6e09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -43,7 +43,9 @@
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
- NETIF_F_HW_VLAN_CTAG_TX, \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -107,14 +109,15 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -167,7 +170,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
/* Init TC2 for PTP_RX */
tc = 2;
@@ -188,9 +191,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -215,12 +218,12 @@ err_exit:
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -304,6 +307,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_itr_rsc_delay_set(self, 1U);
}
+
return aq_hw_err_from_flags(self);
}
@@ -382,9 +386,9 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -413,11 +417,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
u32 val;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
@@ -460,8 +463,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
/* Interrupts */
hw_atl_reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+ ((HW_ATL_B0_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
/* Enable link interrupt */
if (aq_nic_cfg->link_irq_vec)
@@ -478,6 +483,7 @@ static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -485,6 +491,7 @@ static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -492,6 +499,7 @@ static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -499,6 +507,7 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -509,8 +518,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_vlan = false;
bool is_gso = false;
@@ -526,8 +535,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
- txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
+ if (buff->is_gso_tcp || buff->is_gso_udp) {
+ if (buff->is_gso_tcp)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24);
@@ -547,7 +557,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
txd->ctl |= buff->vlan_tx_tag << 4;
is_vlan = true;
}
- if (!buff->is_gso && !buff->is_vlan) {
+ if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
@@ -586,6 +596,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -593,9 +604,9 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -636,8 +647,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -726,8 +737,10 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
+ unsigned int hw_head_;
int err = 0;
- unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -843,6 +856,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
+
return aq_hw_err_from_flags(self);
}
@@ -852,12 +866,14 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&self->dpc);
+
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -866,8 +882,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
- unsigned int i = 0U;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int i = 0U;
hw_atl_rpfl2promiscuous_mode_en_set(self,
IS_FILTER_ENABLED(IFF_PROMISC));
@@ -905,29 +921,30 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 count)
{
int err = 0;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
- for (self->aq_nic_cfg->mc_list_count = 0U;
- self->aq_nic_cfg->mc_list_count < count;
- ++self->aq_nic_cfg->mc_list_count) {
- u32 i = self->aq_nic_cfg->mc_list_count;
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ (cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -1054,6 +1071,7 @@ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -1061,6 +1079,7 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -1427,6 +1446,31 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ switch (mode) {
+ case AQ_HW_LOOPBACK_DMA_SYS:
+ hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_PKT_SYS:
+ hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
+ hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_DMA_NET:
+ hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
+ hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
+ hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_net_lbk_set(self, enable);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -1481,5 +1525,9 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_set_fc = hw_atl_b0_set_fc,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_loopback = hw_atl_b0_set_loopback,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 6cadc9054544..d1f68fc16291 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -563,6 +563,13 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR,
+ HW_ATL_RPB_DMA_NET_LBK_MSK,
+ HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk);
+}
+
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
@@ -1341,7 +1348,26 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
tx_dma_sys_lbk_en);
}
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR,
+ HW_ATL_TPB_DMA_NET_LBK_MSK,
+ HW_ATL_TPB_DMA_NET_LBK_SHIFT,
+ tx_dma_net_lbk_en);
+}
+
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR,
+ HW_ATL_TPB_TX_CLK_GATE_EN_MSK,
+ HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT,
+ tx_clk_gate_en);
+}
+
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 5750b0c9cae7..62992b23c0e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -288,6 +288,9 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
/* set dma system loopback */
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+/* set dma network loopback */
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
+
/* set rx traffic class mode */
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
@@ -629,6 +632,14 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
/* set tx dma system loopback enable */
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+/* set tx dma network loopback enable */
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en);
+
+/* set tx clock gating enable */
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en);
+
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index ec3bcdcefc4d..18de2f7b8959 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -554,6 +554,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
+/* rx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_rpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0
+
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
* port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
@@ -2107,6 +2125,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
+/* tx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_tpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0
+
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2144,6 +2180,24 @@
/* default value of bitfield tx_scp_ins_en */
#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
+/* tx tx_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_clk_gate_en".
+ * port="pif_tpb_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010
+/* inverted bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef
+/* lower bit position of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4
+/* width of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1
+
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_tpo_ipv4_chk_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 6fc5640065bd..8910b62e67ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -47,6 +47,11 @@
#define FORCE_FLASHLESS 0
+enum mcp_area {
+ MCP_AREA_CONFIG = 0x80000000,
+ MCP_AREA_SETTINGS = 0x20000000,
+};
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
@@ -87,6 +92,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
}
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
+
return err;
}
@@ -237,9 +243,9 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
int hw_atl_utils_soft_reset(struct aq_hw_s *self)
{
- int k;
u32 boot_exit_code = 0;
u32 val;
+ int k;
for (k = 0; k < 1000; ++k) {
u32 flb_status = aq_hw_read_reg(self,
@@ -327,10 +333,75 @@ err_exit:
return err;
}
-int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
+static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt, enum mcp_area area)
{
+ u32 data_offset = 0;
+ u32 offset = addr;
+ int err = 0;
u32 val;
+
+ switch (area) {
+ case MCP_AREA_CONFIG:
+ offset -= self->rpc_addr;
+ break;
+
+ case MCP_AREA_SETTINGS:
+ offset -= self->settings_addr;
+ break;
+ }
+
+ offset = offset / sizeof(u32);
+
+ for (; data_offset < cnt; ++data_offset, ++offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (area | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
+ self, val,
+ (val & 0xF0000000) !=
+ area,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt)
+{
+ u32 offset = 0;
int err = 0;
+ u32 val;
+
+ aq_hw_write_reg(self, 0x208, addr);
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ (val & 0x100) == 0U,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
+ u32 cnt, enum mcp_area area)
+{
+ int err = 0;
+ u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
val, val == 1U,
@@ -338,54 +409,47 @@ int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1)) {
- u32 offset = 0;
-
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x328, p[offset]);
- aq_hw_write_reg(self, 0x32C,
- (0x80000000 | (0xFFFF & (offset * 4))));
- hw_atl_mcp_up_force_intr_set(self, 1);
- /* 1000 times by 10us = 10ms */
- err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
- self, val,
- (val & 0xF0000000) !=
- 0x80000000,
- 10U, 10000U);
- }
- } else {
- u32 offset = 0;
-
- aq_hw_write_reg(self, 0x208, a);
+ if (IS_CHIP_FEATURE(REVISION_B1))
+ err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
+ else
+ err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x20C, p[offset]);
- aq_hw_write_reg(self, 0x200, 0xC000);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
- self, val,
- (val & 0x100) == 0,
- 1000U, 10000U);
- }
- }
+ if (err < 0)
+ goto err_exit;
- hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
+ cnt, MCP_AREA_CONFIG);
+}
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
+ p, cnt, MCP_AREA_SETTINGS);
+}
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
- int err = 0;
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
+ int err = 0;
err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
if (err < 0)
goto err_exit;
err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-EOPNOTSUPP : 0;
+
err_exit:
return err;
}
@@ -430,17 +494,16 @@ struct aq_hw_atl_utils_fw_rpc_tid_s {
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ int err = 0;
if (!IS_CHIP_FEATURE(MIPS)) {
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
- (u32 *)(void *)&self->rpc,
- (rpc_size + sizeof(u32) -
- sizeof(u8)) / sizeof(u32));
+ err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
@@ -455,9 +518,9 @@ err_exit:
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+ int err = 0;
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
@@ -561,10 +624,10 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- int err = 0;
- u32 transaction_id = 0;
- struct hw_atl_utils_mbox_header mbox;
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ struct hw_atl_utils_mbox_header mbox;
+ u32 transaction_id = 0;
+ int err = 0;
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -592,20 +655,26 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
val |= state & HW_ATL_MPI_STATE_MSK;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
err_exit:
return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
- u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
- u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
- if (!link_speed_mask) {
+ mpi_state = hw_atl_utils_mpi_get_state(self);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
+
+ if (!speed) {
link_status->mbps = 0U;
} else {
- switch (link_speed_mask) {
+ switch (speed) {
case HAL_ATLANTIC_RATE_10G:
link_status->mbps = 10000U;
break;
@@ -638,14 +707,15 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u8 *mac)
{
+ u32 mac_addr[2];
+ u32 efuse_addr;
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2];
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
- unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
+ unsigned int rnd = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
@@ -653,11 +723,10 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- err = hw_atl_utils_fw_downld_dwords(self,
- aq_hw_read_reg(self, 0x00000374U) +
- (40U * 4U),
- mac_addr,
- ARRAY_SIZE(mac_addr));
+ efuse_addr = aq_hw_read_reg(self, 0x00000374U);
+
+ err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
+ mac_addr, ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -719,14 +788,15 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
default:
break;
}
+
return ret;
}
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
- u32 chip_features = 0U;
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
+ u32 chip_features = 0U;
if ((0xFU & mif_rev) == 1U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
@@ -753,13 +823,14 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
hw_atl_utils_mpi_set_speed(self, 0);
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+
return 0;
}
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_utils_mbox mbox;
struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -836,16 +907,19 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
hw_atl_utils_hw_mac_regs[i]);
+
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
+
return 0;
}
-static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
+static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
+ u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -858,22 +932,26 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
memset(prpc, 0, sizeof(*prpc));
if (wol_enabled) {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
+ rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
+ sizeof(prpc->msg_wol_add);
+
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
- prpc->msg_wol.priority =
+ prpc->msg_wol_add.priority =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
- prpc->msg_wol.wol_packet_type =
+ prpc->msg_wol_add.packet_type =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
- ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac);
+ ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
+ mac);
} else {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
+ rpc_size = sizeof(prpc->msg_wol_remove) +
+ offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
}
@@ -890,8 +968,8 @@ static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
unsigned int rpc_size = 0U;
int err = 0;
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw1x_set_wol(self, 1, mac);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ err = aq_fw1x_set_wake_magic(self, 1, mac);
if (err < 0)
goto err_exit;
@@ -965,4 +1043,5 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_flow_control = NULL,
.send_fw_request = NULL,
.enable_ptp = NULL,
+ .led_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index ee11b107f0a5..42f0c5c6ec2d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -70,104 +70,41 @@ struct __packed hw_atl_stats_s {
u32 dpc;
};
-union __packed ip_addr {
- struct {
- u8 addr[16];
- } v6;
- struct {
- u8 padding[12];
- u8 addr[4];
- } v4;
-};
-
-struct __packed hw_atl_utils_fw_rpc {
- u32 msg_id;
-
+struct __packed drv_msg_enable_wakeup {
union {
- struct {
- u32 pong;
- } msg_ping;
+ u32 pattern_mask;
struct {
- u8 mac_addr[6];
- u32 ip_addr_cnt;
+ u32 reason_arp_v4_pkt : 1;
+ u32 reason_ipv4_ping_pkt : 1;
+ u32 reason_ipv6_ns_pkt : 1;
+ u32 reason_ipv6_ping_pkt : 1;
+ u32 reason_link_up : 1;
+ u32 reason_link_down : 1;
+ u32 reason_maximum : 1;
+ };
+ };
- struct {
- union ip_addr addr;
- union ip_addr mask;
- } ip[1];
- } msg_arp;
+ union {
+ u32 offload_mask;
+ };
+};
- struct {
- u32 len;
- u8 packet[1514U];
- } msg_inject;
+struct __packed magic_packet_pattern_s {
+ u8 mac_addr[ETH_ALEN];
+};
- struct {
- u32 priority;
- u32 wol_packet_type;
- u32 pattern_id;
- u32 next_wol_pattern_offset;
-
- union {
- struct {
- u32 flags;
- u8 ipv4_source_address[4];
- u8 ipv4_dest_address[4];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv4_tcp_syn_parameters;
-
- struct {
- u32 flags;
- u8 ipv6_source_address[16];
- u8 ipv6_dest_address[16];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv6_tcp_syn_parameters;
-
- struct {
- u32 flags;
- } eapol_request_id_message_parameters;
-
- struct {
- u32 flags;
- u32 mask_offset;
- u32 mask_size;
- u32 pattern_offset;
- u32 pattern_size;
- } wol_bit_map_pattern;
-
- struct {
- u8 mac_addr[ETH_ALEN];
- } wol_magic_packet_patter;
- } wol_pattern;
- } msg_wol;
+struct __packed drv_msg_wol_add {
+ u32 priority;
+ u32 packet_type;
+ u32 pattern_id;
+ u32 next_pattern_offset;
- struct {
- union {
- u32 pattern_mask;
-
- struct {
- u32 reason_arp_v4_pkt : 1;
- u32 reason_ipv4_ping_pkt : 1;
- u32 reason_ipv6_ns_pkt : 1;
- u32 reason_ipv6_ping_pkt : 1;
- u32 reason_link_up : 1;
- u32 reason_link_down : 1;
- u32 reason_maximum : 1;
- };
- };
-
- union {
- u32 offload_mask;
- };
- } msg_enable_wakeup;
+ struct magic_packet_pattern_s magic_packet_pattern;
+};
- struct {
- u32 id;
- } msg_del_id;
- };
+struct __packed drv_msg_wol_remove {
+ u32 id;
};
struct __packed hw_atl_utils_mbox_header {
@@ -176,7 +113,7 @@ struct __packed hw_atl_utils_mbox_header {
u32 error;
};
-struct __packed hw_aq_ptp_offset {
+struct __packed hw_atl_ptp_offset {
u16 ingress_100;
u16 egress_100;
u16 ingress_1000;
@@ -189,6 +126,13 @@ struct __packed hw_aq_ptp_offset {
u16 egress_10000;
};
+struct __packed hw_atl_cable_diag {
+ u8 fault;
+ u8 distance;
+ u8 far_distance;
+ u8 reserved;
+};
+
enum gpio_pin_function {
GPIO_PIN_FUNCTION_NC,
GPIO_PIN_FUNCTION_VAUX_ENABLE,
@@ -204,14 +148,14 @@ enum gpio_pin_function {
GPIO_PIN_FUNCTION_SIZE
};
-struct __packed hw_aq_info {
+struct __packed hw_atl_info {
u8 reserved[6];
u16 phy_fault_code;
u16 phy_temperature;
u8 cable_len;
u8 reserved1;
- u32 cable_diag_data[4];
- struct hw_aq_ptp_offset ptp_offset;
+ struct hw_atl_cable_diag cable_diag_data[4];
+ struct hw_atl_ptp_offset ptp_offset;
u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
@@ -233,28 +177,25 @@ struct __packed hw_aq_info {
struct __packed hw_atl_utils_mbox {
struct hw_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
- struct hw_aq_info info;
+ struct hw_atl_info info;
};
-/* fw2x */
-typedef u32 fw_offset_t;
-
struct __packed offload_ip_info {
u8 v4_local_addr_count;
u8 v4_addr_count;
u8 v6_local_addr_count;
u8 v6_addr_count;
- fw_offset_t v4_addr;
- fw_offset_t v4_prefix;
- fw_offset_t v6_addr;
- fw_offset_t v6_prefix;
+ u32 v4_addr;
+ u32 v4_prefix;
+ u32 v6_addr;
+ u32 v6_prefix;
};
struct __packed offload_port_info {
u16 udp_port_count;
u16 tcp_port_count;
- fw_offset_t udp_port;
- fw_offset_t tcp_port;
+ u32 udp_port;
+ u32 tcp_port;
};
struct __packed offload_ka_info {
@@ -262,15 +203,15 @@ struct __packed offload_ka_info {
u16 v6_ka_count;
u32 retry_count;
u32 retry_interval;
- fw_offset_t v4_ka;
- fw_offset_t v6_ka;
+ u32 v4_ka;
+ u32 v6_ka;
};
struct __packed offload_rr_info {
u32 rr_count;
u32 rr_buf_len;
- fw_offset_t rr_id_x;
- fw_offset_t rr_buf;
+ u32 rr_id_x;
+ u32 rr_buf;
};
struct __packed offload_info {
@@ -287,6 +228,19 @@ struct __packed offload_info {
u8 buf[0];
};
+struct __packed hw_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ /* fw1x structures */
+ struct drv_msg_wol_add msg_wol_add;
+ struct drv_msg_wol_remove msg_wol_remove;
+ struct drv_msg_enable_wakeup msg_enable_wakeup;
+ /* fw2x structures */
+ struct offload_info fw2x_offloads;
+ };
+};
+
/* Mailbox FW Request interface */
struct __packed hw_fw_request_ptp_gpio_ctrl {
u32 index;
@@ -323,9 +277,54 @@ struct __packed hw_fw_request_iface {
};
};
+struct __packed hw_atl_utils_settings {
+ u32 mtu;
+ u32 downshift_retry_count;
+ u32 link_pause_frame_quanta_100m;
+ u32 link_pause_frame_threshold_100m;
+ u32 link_pause_frame_quanta_1g;
+ u32 link_pause_frame_threshold_1g;
+ u32 link_pause_frame_quanta_2p5g;
+ u32 link_pause_frame_threshold_2p5g;
+ u32 link_pause_frame_quanta_5g;
+ u32 link_pause_frame_threshold_5g;
+ u32 link_pause_frame_quanta_10g;
+ u32 link_pause_frame_threshold_10g;
+ u32 pfc_quanta_class_0;
+ u32 pfc_threshold_class_0;
+ u32 pfc_quanta_class_1;
+ u32 pfc_threshold_class_1;
+ u32 pfc_quanta_class_2;
+ u32 pfc_threshold_class_2;
+ u32 pfc_quanta_class_3;
+ u32 pfc_threshold_class_3;
+ u32 pfc_quanta_class_4;
+ u32 pfc_threshold_class_4;
+ u32 pfc_quanta_class_5;
+ u32 pfc_threshold_class_5;
+ u32 pfc_quanta_class_6;
+ u32 pfc_threshold_class_6;
+ u32 pfc_quanta_class_7;
+ u32 pfc_threshold_class_7;
+ u32 eee_link_down_timeout;
+ u32 eee_link_up_timeout;
+ u32 eee_max_link_drops;
+ u32 eee_rates_mask;
+ u32 wake_timer;
+ u32 thermal_shutdown_off_temp;
+ u32 thermal_shutdown_warning_temp;
+ u32 thermal_shutdown_cold_temp;
+ u32 msm_options;
+ u32 dac_cable_serdes_modes;
+ u32 media_detect;
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
+ HW_ATL_RX_MNGMNT,
+ HW_ATL_RX_HOST_AND_MNGMNT,
+ HW_ATL_RX_WOL
};
struct aq_rx_filter_vlan {
@@ -407,20 +406,12 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
-#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U
-#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U
-#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
-#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U
-#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU
-#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU
enum hw_atl_fw2x_rate {
FW2X_RATE_100M = 0x20,
@@ -605,7 +596,10 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
-int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt);
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt);
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt);
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index f649ac949d06..97ebf849695f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -17,6 +17,7 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
+#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
@@ -34,12 +35,16 @@
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK)
#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
@@ -50,6 +55,9 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+#define HW_ATL_FW_VER_LED 0x03010026U
+#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU
+
struct __packed fw2x_msg_wol_pattern {
u8 mask[16];
u32 crc;
@@ -74,6 +82,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@@ -91,6 +100,8 @@ static int aq_fw2x_init(struct aq_hw_s *self)
self->rpc_addr != 0U,
1000U, 100000U);
+ err = aq_fw2x_settings_get(self, &self->settings_addr);
+
return err;
}
@@ -170,17 +181,26 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
-static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self,
+ u32 *mpi_state, u32 fc)
{
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- *mpi_state |= BIT(CAPS_HI_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+ *mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE);
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ switch (fc) {
+ /* There is not explicit mode of RX only pause frames,
+ * thus, we join this mode with FC full.
+ * FC full is either Rx, either Tx, or both.
+ */
+ case AQ_NIC_FC_FULL:
+ case AQ_NIC_FC_RX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ case AQ_NIC_FC_TX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ }
}
static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
@@ -204,7 +224,8 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
case MPI_INIT:
mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
break;
case MPI_DEINIT:
mpi_state |= BIT(CAPS_HI_LINK_DROP);
@@ -215,15 +236,20 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
break;
}
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
return 0;
}
static int aq_fw2x_update_link_status(struct aq_hw_s *self)
{
- u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
- u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
if (speed) {
if (speed & FW2X_RATE_10G)
@@ -247,11 +273,11 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+ u32 mac_addr[2] = { 0 };
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2] = { 0 };
- u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -285,15 +311,16 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
+
return err;
}
static int aq_fw2x_update_stats(struct aq_hw_s *self)
{
- int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
u32 stats_val;
+ int err = 0;
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -320,9 +347,9 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
int err = 0;
u32 val;
- phy_temp_offset = self->mbox_addr +
- offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, phy_temperature);
+ phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.phy_temperature);
+
/* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
@@ -345,87 +372,46 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct offload_info *cfg = NULL;
- unsigned int rpc_size = 0U;
- u32 mpi_opts;
+ struct offload_info *info = NULL;
+ u32 wol_bits = 0;
+ u32 rpc_size;
int err = 0;
u32 val;
- rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- memset(rpc, 0, rpc_size);
- cfg = (struct offload_info *)(&rpc->msg_id + 1);
-
- memcpy(cfg->mac_addr, mac, ETH_ALEN);
- cfg->len = sizeof(*cfg);
-
- /* Clear bit 0x36C.23 and 0x36C.22 */
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP;
-
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
-
- err = hw_atl_utils_fw_rpc_call(self, rpc_size);
- if (err < 0)
- goto err_exit;
-
- /* Set bit 0x36C.23 */
- mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
-
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val,
- val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
- 1U, 100000U);
-
-err_exit:
- return err;
-}
-
-static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
-{
- struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct fw2x_msg_wol *msg = NULL;
- u32 mpi_opts;
- int err = 0;
- u32 val;
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- msg = (struct fw2x_msg_wol *)rpc;
-
- memset(msg, 0, sizeof(*msg));
-
- msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
- msg->magic_packet_enabled = true;
- memcpy(msg->hw_addr, mac, ETH_ALEN);
+ if (self->aq_nic_cfg->wol & WAKE_PHY) {
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR,
+ HW_ATL_FW2X_CTRL_LINK_DROP);
+ readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ (val &
+ HW_ATL_FW2X_CTRL_LINK_DROP) != 0,
+ 1000, 100000);
+ wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK;
+ }
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY |
+ HW_ATL_FW2X_CTRL_WOL;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
- err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
- if (err < 0)
- goto err_exit;
+ rpc_size = sizeof(*info) +
+ offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads);
+ memset(rpc, 0, rpc_size);
+ info = &rpc->fw2x_offloads;
+ memcpy(info->mac_addr, mac, ETH_ALEN);
+ info->len = sizeof(*info);
- /* Set bit 0x36C.24 */
- mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val, val & HW_ATL_FW2X_CTRL_WOL,
- 1U, 10000U);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits);
err_exit:
return err;
@@ -436,14 +422,9 @@ static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
{
int err = 0;
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw2x_set_sleep_proxy(self, mac);
- if (err < 0)
- goto err_exit;
- err = aq_fw2x_set_wol_params(self, mac);
- }
+ if (self->aq_nic_cfg->wol)
+ err = aq_fw2x_set_wol(self, mac);
-err_exit:
return err;
}
@@ -460,8 +441,7 @@ static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
dword_cnt = size / sizeof(u32);
if (size % sizeof(u32))
dword_cnt++;
- err = hw_atl_utils_fw_upload_dwords(self, aq_fw2x_rpc_get(self),
- (void *)fw_req, dword_cnt);
+ err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt);
if (err < 0)
goto err_exit;
@@ -495,6 +475,16 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
}
+static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+{
+ if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
+ return -EOPNOTSUPP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
+
+ return 0;
+}
+
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
@@ -512,11 +502,12 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
u32 mpi_state;
u32 caps_hi;
int err = 0;
- u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, caps_hi);
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.caps_hi);
- err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi,
- sizeof(caps_hi) / sizeof(u32));
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1);
if (err)
return err;
@@ -544,7 +535,8 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
@@ -554,17 +546,41 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_fw2x_state2_get(self);
+ *fcmode = 0;
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_RX;
+ *fcmode |= AQ_NIC_FC_RX;
+
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode |= AQ_NIC_FC_TX;
+
+ return 0;
+}
+
+static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ u32 mpi_opts;
+
+ switch (mode) {
+ case AQ_HW_LOOPBACK_PHYINT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK;
else
- *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
- else
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_TX;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ case AQ_HW_LOOPBACK_PHYEXT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
else
- *fcmode = 0;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -579,6 +595,19 @@ static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
}
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
+{
+ int err = 0;
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.setting_address);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1);
+
+ return err;
+}
+
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
@@ -602,4 +631,6 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.get_flow_control = aq_fw2x_get_flow_control,
.send_fw_request = aq_fw2x_send_fw_request,
.enable_ptp = aq_fw3x_enable_ptp,
+ .led_control = aq_fw2x_led_control,
+ .set_phyloopback = aq_fw2x_set_phyloopback,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 78e52d217e56..539166112993 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -20,9 +20,10 @@
static int emac_arc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct net_device *ndev;
struct arc_emac_priv *priv;
- int interface, err;
+ phy_interface_t interface;
+ struct net_device *ndev;
+ int err;
if (!dev->of_node)
return -ENODEV;
@@ -37,9 +38,13 @@ static int emac_arc_probe(struct platform_device *pdev)
priv->drv_name = DRV_NAME;
priv->drv_version = DRV_VERSION;
- interface = of_get_phy_mode(dev->of_node);
- if (interface < 0)
- interface = PHY_INTERFACE_MODE_MII;
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err) {
+ if (err == -ENODEV)
+ interface = PHY_INTERFACE_MODE_MII;
+ else
+ goto out_netdev;
+ }
priv->clk = devm_clk_get(dev, "hclk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 664d664e0925..aae231c5224f 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -97,8 +97,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
struct net_device *ndev;
struct rockchip_priv_data *priv;
const struct of_device_id *match;
+ phy_interface_t interface;
u32 data;
- int err, interface;
+ int err;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -114,7 +115,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
- interface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err)
+ goto out_netdev;
/* RK3036/RK3066/RK3188 SoCs only support RMII */
if (interface != PHY_INTERFACE_MODE_RMII) {
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1b1a09095c0d..8f5021091eee 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1744,10 +1744,9 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr);
}
- ag->phy_if_mode = of_get_phy_mode(np);
- if (ag->phy_if_mode < 0) {
+ err = of_get_phy_mode(np, ag->phy_if_mode);
+ if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- err = ag->phy_if_mode;
goto err_free;
}
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 37752d9514e7..30b455013bf3 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1371,8 +1371,8 @@ static int nb8800_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->base = base;
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (priv->phy_mode < 0)
+ ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (ret)
priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
priv->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index aacc3cce2cc0..40941fb6065b 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -287,7 +287,7 @@ struct nb8800_priv {
struct device_node *phy_node;
/* PHY connection type from DT */
- int phy_mode;
+ phy_interface_t phy_mode;
/* Current link status */
int speed;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a977a459bd20..825af709708e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2479,9 +2479,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->netdev = dev;
priv->pdev = pdev;
- priv->phy_interface = of_get_phy_mode(dn);
+ ret = of_get_phy_mode(dn, &priv->phy_interface);
/* Default to GMII interface mode */
- if ((int)priv->phy_interface < 0)
+ if (ret)
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
/* In the case of a fixed PHY, the DT node associated
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d10b421ed1f1..5e037a305b83 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1934,7 +1934,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
+ return netdev_pick_tx(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 226ab29f4cb6..3f8435208bf4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -32,31 +32,31 @@
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[323].base + ((pfId) * IRO[323].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[325].base + ((pfId) * IRO[325].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+ (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[322].base + ((pfId) * IRO[322].m1))
+ (IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[314].base + ((pfId) * IRO[314].m1))
+ (IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[313].base + ((pfId) * IRO[313].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[312].base + ((pfId) * IRO[312].m1))
+ (IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -99,81 +99,81 @@
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[276].base + ((pfId) * IRO[276].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[270].base + ((pfId) * IRO[270].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[223].base + ((pfId) * IRO[223].m1))
+ (IRO[224].base + ((pfId) * IRO[224].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
-#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_AGG_DATA_OFFSET (IRO[213].base)
+#define USTORM_AGG_DATA_SIZE (IRO[213].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[325].base + ((pfId) * IRO[325].m1))
+ (IRO[326].base + ((pfId) * IRO[326].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
- IRO[215].m2))
+ (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \
+ IRO[216].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[216].base + ((qzoneId) * IRO[216].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
-#define USTORM_TPA_BTR_SIZE (IRO[213].size)
+ (IRO[217].base + ((qzoneId) * IRO[217].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[214].base)
+#define USTORM_TPA_BTR_SIZE (IRO[214].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
@@ -188,39 +188,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[304].base + ((pfId) * IRO[304].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[303].base + ((pfId) * IRO[303].m1))
+ (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -233,12 +233,12 @@
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[217].base + ((portId) * IRO[217].m1))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[219].base + ((portId) * IRO[219].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
- IRO[220].m2))
+ (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \
+ IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 78326a6c0aba..622fadc50316 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 11
+#define BCM_5710_FW_REVISION_VERSION 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d581d0ae6584..9638d65d8261 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -5611,9 +5611,9 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -5685,7 +5685,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
return rc;
}
-static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
@@ -7364,9 +7364,9 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val = 0, tmp1;
@@ -7427,7 +7427,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
- return 0;
+ return;
} else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
@@ -7509,7 +7509,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
- return 0;
}
static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
@@ -7676,9 +7675,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8705 PHY SECTION */
/******************************************************************/
-static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "init 8705\n");
@@ -7700,7 +7699,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
/* BCM8705 doesn't have microcode, hence the 0 */
bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
- return 0;
}
static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
@@ -8887,9 +8885,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8706 PHY SECTION */
/******************************************************************/
-static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 cnt, val, tmp1;
@@ -8989,13 +8987,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
}
-
- return 0;
}
-static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
return bnx2x_8706_8726_read_status(phy, params, vars);
}
@@ -9070,9 +9066,9 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
}
-static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
@@ -9150,9 +9146,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_PMA_REG_8726_TX_CTRL2,
phy->tx_preemphasis[1]);
}
-
- return 0;
-
}
static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
@@ -9288,9 +9281,9 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 tmp1, mod_abs, tmp2;
@@ -9370,8 +9363,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
(tmp2 & 0x7fff));
}
-
- return 0;
}
static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
@@ -9946,9 +9937,9 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
@@ -9960,7 +9951,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- return bnx2x_848xx_cmn_config_init(phy, params, vars);
+ bnx2x_848xx_cmn_config_init(phy, params, vars);
}
#define PHY848xx_CMDHDLR_WAIT 300
@@ -10210,8 +10201,8 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
return reset_gpios;
}
-static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
- struct link_params *params)
+static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 reset_gpios;
@@ -10239,8 +10230,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
udelay(10);
DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
reset_gpios);
-
- return 0;
}
static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
@@ -10283,9 +10272,9 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
}
#define PHY84833_CONSTANT_LATENCY 1193
-static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
@@ -10430,7 +10419,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
if (rc) {
DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
bnx2x_8483x_disable_eee(phy, params, vars);
- return rc;
+ return;
}
if ((phy->req_duplex == DUPLEX_FULL) &&
@@ -10442,7 +10431,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_8483x_disable_eee(phy, params, vars);
if (rc) {
DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
- return rc;
+ return;
}
} else {
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
@@ -10481,7 +10470,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_84833_TOP_CFG_XGPHY_STRAP1,
(u16)~MDIO_84833_SUPER_ISOLATE);
}
- return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
@@ -11038,9 +11026,9 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port;
@@ -11240,8 +11228,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
bnx2x_cl22_write(bp, phy,
MDIO_PMA_REG_CTRL, autoneg_val);
-
- return 0;
}
@@ -11465,9 +11451,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
@@ -11502,7 +11488,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port,
(u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
- return 0;
}
static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
@@ -11636,14 +11621,14 @@ static const struct bnx2x_phy phy_null = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)NULL,
- .read_status = (read_status_t)NULL,
- .link_reset = (link_reset_t)NULL,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_serdes = {
@@ -11671,14 +11656,14 @@ static const struct bnx2x_phy phy_serdes = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_xgxs = {
@@ -11707,14 +11692,14 @@ static const struct bnx2x_phy phy_xgxs = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = bnx2x_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_xgxs_specific_func
};
static const struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11745,14 +11730,14 @@ static const struct bnx2x_phy phy_warpcore = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_warpcore_config_init,
- .read_status = (read_status_t)bnx2x_warpcore_read_status,
- .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_warpcore_config_init,
+ .read_status = bnx2x_warpcore_read_status,
+ .link_reset = bnx2x_warpcore_link_reset,
+ .config_loopback = bnx2x_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = bnx2x_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
@@ -11776,14 +11761,14 @@ static const struct bnx2x_phy phy_7101 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_7101_config_init,
- .read_status = (read_status_t)bnx2x_7101_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_7101_config_init,
+ .read_status = bnx2x_7101_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = bnx2x_7101_config_loopback,
+ .format_fw_ver = bnx2x_7101_format_ver,
+ .hw_reset = bnx2x_7101_hw_reset,
+ .set_link_led = bnx2x_7101_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8073 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
@@ -11807,14 +11792,14 @@ static const struct bnx2x_phy phy_8073 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8073_config_init,
- .read_status = (read_status_t)bnx2x_8073_read_status,
- .link_reset = (link_reset_t)bnx2x_8073_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
+ .config_init = bnx2x_8073_config_init,
+ .read_status = bnx2x_8073_read_status,
+ .link_reset = bnx2x_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_8073_specific_func
};
static const struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11835,14 +11820,14 @@ static const struct bnx2x_phy phy_8705 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8705_config_init,
- .read_status = (read_status_t)bnx2x_8705_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8705_config_init,
+ .read_status = bnx2x_8705_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
@@ -11864,14 +11849,14 @@ static const struct bnx2x_phy phy_8706 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8706_config_init,
- .read_status = (read_status_t)bnx2x_8706_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8706_config_init,
+ .read_status = bnx2x_8706_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8726 = {
@@ -11896,14 +11881,14 @@ static const struct bnx2x_phy phy_8726 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8726_config_init,
- .read_status = (read_status_t)bnx2x_8726_read_status,
- .link_reset = (link_reset_t)bnx2x_8726_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8726_config_init,
+ .read_status = bnx2x_8726_read_status,
+ .link_reset = bnx2x_8726_link_reset,
+ .config_loopback = bnx2x_8726_config_loopback,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8727 = {
@@ -11927,14 +11912,14 @@ static const struct bnx2x_phy phy_8727 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8727_config_init,
- .read_status = (read_status_t)bnx2x_8727_read_status,
- .link_reset = (link_reset_t)bnx2x_8727_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+ .config_init = bnx2x_8727_config_init,
+ .read_status = bnx2x_8727_read_status,
+ .link_reset = bnx2x_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = bnx2x_8727_hw_reset,
+ .set_link_led = bnx2x_8727_set_link_led,
+ .phy_specific_func = bnx2x_8727_specific_func
};
static const struct bnx2x_phy phy_8481 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
@@ -11962,14 +11947,14 @@ static const struct bnx2x_phy phy_8481 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8481_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_8481_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8481_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_8481_hw_reset,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_84823 = {
@@ -11999,14 +11984,14 @@ static const struct bnx2x_phy phy_84823 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84833 = {
@@ -12034,14 +12019,14 @@ static const struct bnx2x_phy phy_84833 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84834 = {
@@ -12068,14 +12053,14 @@ static const struct bnx2x_phy phy_84834 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84858 = {
@@ -12102,14 +12087,14 @@ static const struct bnx2x_phy phy_84858 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_8485x_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_54618se = {
@@ -12136,14 +12121,14 @@ static const struct bnx2x_phy phy_54618se = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_54618se_config_init,
- .read_status = (read_status_t)bnx2x_54618se_read_status,
- .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
+ .config_init = bnx2x_54618se_config_init,
+ .read_status = bnx2x_54618se_read_status,
+ .link_reset = bnx2x_54618se_link_reset,
+ .config_loopback = bnx2x_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_5461x_set_link_led,
+ .phy_specific_func = bnx2x_54618se_specific_func
};
/*****************************************************************/
/* */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7115f5025664..cae03c89dc73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -127,15 +127,15 @@ struct link_vars;
struct link_params;
struct bnx2x_phy;
-typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
- struct link_vars *vars);
+typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef void (*link_reset_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
struct link_params *params);
-typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0edbb0a76847..5097a44686b3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9da4fbee3cf7..35bc579d594a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1767,8 +1767,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnapi->cp_ring.rx_buf_errors++;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ netdev_warn(bp->dev, "RX buffer error %x\n",
+ rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
}
goto next_rx_no_len;
}
@@ -4269,6 +4273,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
@@ -4292,6 +4301,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
@@ -4432,7 +4446,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
- flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
+ FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
@@ -4596,21 +4611,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
- u32 dst_ena = 0;
+ u32 flags = 0;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
- dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
- req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
- vnic = &bp->vnic_info[0];
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req.dst_id = cpu_to_le16(fltr->rxq);
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
@@ -6938,6 +6953,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
@@ -7037,8 +7054,8 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
flags = le32_to_cpu(resp->flags);
if (flags &
- CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
- bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
hwrm_cfa_adv_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -9688,7 +9705,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
static bool bnxt_rfs_supported(struct bnxt *bp)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
}
@@ -10110,6 +10127,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
void bnxt_fw_exception(struct bnxt *bp)
{
+ netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
@@ -10738,6 +10756,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
break;
}
@@ -10745,6 +10764,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
fw_reset_abort:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
+ bnxt_dl_health_status_update(bp, false);
bp->fw_reset_state = 0;
rtnl_lock();
dev_close(bp->dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a3545c846bfb..37549cac3de6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.0"
+#define DRV_MODULE_VERSION "1.10.1"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 0
+#define DRV_VER_UPD 1
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -932,6 +932,7 @@ struct bnxt_cp_ring_info {
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
u64 missed_irqs;
struct bnxt_ring_struct cp_ring_struct;
@@ -1383,6 +1384,7 @@ struct bnxt_fw_health {
u32 last_fw_reset_cnt;
u8 enabled:1;
u8 master:1;
+ u8 fatal:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1666,10 +1668,11 @@ struct bnxt {
#define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
+ #define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ae4ddf33fe5c..707827176231 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -91,6 +91,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
if (!priv_ctx)
return -EOPNOTSUPP;
+ bp->fw_health->fatal = true;
event = fw_reporter_ctx->sp_event;
if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
bnxt_fw_reset(bp);
@@ -199,6 +200,26 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
}
}
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+ u8 state;
+
+ if (healthy)
+ state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ else
+ state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ if (health->fatal)
+ devlink_health_reporter_state_update(health->fw_fatal_reporter,
+ state);
+ else
+ devlink_health_reporter_state_update(health->fw_reset_reporter,
+ state);
+
+ health->fatal = false;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -314,10 +335,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
+ } else {
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (resp->cmd_err ==
+ NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
+ rc = -EOPNOTSUPP;
+ }
}
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
if (rc == -EACCES)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f4fd0a7d04b..665d4bdcd8c0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -57,6 +57,7 @@ struct bnxt_dl_nvm_param {
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f2220b826d61..0641020b56d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
static const char * const bnxt_ring_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_buf_errors",
"missed_irqs",
};
@@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+ buf[j++] = cpr->rx_buf_errors;
buf[j++] = cpr->missed_irqs;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
@@ -1785,6 +1787,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNXT_FW_RESET_CHIP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
break;
case BNXT_FW_RESET_AP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
@@ -2981,7 +2985,8 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EOPNOTSUPP;
}
- if (pci_vfs_assigned(bp->pdev)) {
+ if (pci_vfs_assigned(bp->pdev) &&
+ !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
netdev_err(dev,
"Reset not allowed when VFs are assigned to VMs\n");
return -EBUSY;
@@ -2994,7 +2999,9 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc) {
- netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ netdev_info(dev, "Reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ netdev_info(dev, "Reload driver to complete reset\n");
*flags = 0;
}
} else if (*flags == ETH_RESET_AP) {
@@ -3038,7 +3045,8 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
mutex_lock(&bp->hwrm_cmd_lock);
while (1) {
*seq_ptr = cpu_to_le16(seq);
- rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message(bp, msg, msg_len,
+ HWRM_COREDUMP_TIMEOUT);
if (rc)
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 03b197eb793b..7cf27dffadb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -176,6 +176,9 @@ struct cmd_nums {
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -208,7 +211,7 @@ struct cmd_nums {
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
#define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
#define HWRM_FW_STATE_QUIESCE 0xc5UL
#define HWRM_FW_STATE_BACKUP 0xc6UL
#define HWRM_FW_STATE_RESTORE 0xc7UL
@@ -225,8 +228,11 @@ struct cmd_nums {
#define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
#define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -308,6 +314,7 @@ struct cmd_nums {
#define HWRM_ENGINE_STATS_CONFIG 0x155UL
#define HWRM_ENGINE_STATS_CLEAR 0x156UL
#define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
#define HWRM_ENGINE_RQ_ALLOC 0x15eUL
#define HWRM_ENGINE_RQ_FREE 0x15fUL
#define HWRM_ENGINE_CQ_ALLOC 0x160UL
@@ -390,6 +397,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -420,9 +428,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 100
-#define HWRM_VERSION_STR "1.10.0.100"
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 12
+#define HWRM_VERSION_STR "1.10.1.12"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -637,6 +645,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1115,6 +1125,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1255,7 +1266,8 @@ struct hwrm_func_qcfg_output {
u8 unused_1;
u8 always_1;
__le32 reset_addr_poll;
- u8 unused_2[3];
+ __le16 legacy_l2_db_size_kb;
+ u8 unused_2[1];
u8 valid;
};
@@ -1500,6 +1512,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1762,7 +1775,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1792,6 +1805,10 @@ struct hwrm_func_backing_store_qcaps_output {
__le32 tim_max_entries;
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le32 rsvd;
+ __le16 rsvd1;
+ u8 rsvd2;
u8 valid;
};
@@ -2524,6 +2541,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -2761,8 +2779,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le16 resp_len;
u8 flags;
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -3177,10 +3195,12 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4980,6 +5000,15 @@ struct hwrm_vnic_rss_cfg_output {
u8 valid;
};
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
__le16 req_type;
@@ -5807,7 +5836,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid;
};
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5815,10 +5844,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -5887,8 +5918,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port;
__be16 dst_port_mask;
__le64 ntuple_filter_id_hint;
- __le16 rfs_ring_tbl_idx;
- u8 unused_0[6];
};
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
@@ -5954,7 +5983,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
__le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -6534,18 +6564,21 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
u8 unused_0[3];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 174412a55e53..0cc6ec51f45f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -166,8 +166,8 @@ bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
}
- if (!is_wildcard(&eth_addr_mask[ETH_ALEN], ETH_ALEN)) {
- if (!is_exactmatch(&eth_addr_mask[ETH_ALEN], ETH_ALEN))
+ if (!is_wildcard(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN))
return -EINVAL;
/* FW expects smac to be in u16 array format */
p = &eth_addr[ETH_ALEN / 2];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 286754903543..10c62b094914 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -64,9 +64,9 @@ struct bnxt_tc_tunnel_key {
#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \
((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \
- is_wildcard(&(eth_addr)[ETH_ALEN], ETH_ALEN)) || \
+ is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \
(is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \
- is_wildcard(&(eth_addr_mask)[ETH_ALEN], ETH_ALEN)))
+ is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN)))
struct bnxt_tc_actions {
u32 flags;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 155599dcee76..61ab7d21f6bd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5208,6 +5208,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
+ data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 4f689fb7a61c..120fa05a39ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv)
/* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
- udelay(2);
- bcmgenet_umac_writel(priv, 0, UMAC_CMD);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2578,7 +2576,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init rDma */
- bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Rx queues */
ret = bcmgenet_init_rx_queues(priv->dev);
@@ -2591,7 +2590,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init tDma */
- bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
@@ -2614,8 +2614,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
if (status & UMAC_IRQ_PHY_DET_R &&
- priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
phy_init_hw(priv->dev->phydev);
+ genphy_config_aneg(priv->dev->phydev);
+ }
/* Link UP/DOWN event */
if (status & UMAC_IRQ_LINK_EVENT)
@@ -2879,12 +2881,6 @@ static int bcmgenet_open(struct net_device *dev)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- ret = bcmgenet_mii_connect(dev);
- if (ret) {
- netdev_err(dev, "failed to connect to PHY\n");
- goto err_clk_disable;
- }
-
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
@@ -2894,12 +2890,6 @@ static int bcmgenet_open(struct net_device *dev)
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
- ret = bcmgenet_mii_config(dev, true);
- if (ret) {
- netdev_err(dev, "unsupported PHY\n");
- goto err_disconnect_phy;
- }
-
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
@@ -2915,7 +2905,7 @@ static int bcmgenet_open(struct net_device *dev)
ret = bcmgenet_init_dma(priv);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
- goto err_disconnect_phy;
+ goto err_clk_disable;
}
/* Always enable ring 16 - descriptor ring */
@@ -2938,19 +2928,25 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
+ ret = bcmgenet_mii_probe(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_irq1;
+ }
+
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
+err_irq1:
+ free_irq(priv->irq1, priv);
err_irq0:
free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv);
-err_disconnect_phy:
- phy_disconnect(dev->phydev);
err_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3426,12 +3422,48 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
params->words_per_bd);
}
+struct bcmgenet_plat_data {
+ enum bcmgenet_version version;
+ u32 dma_max_burst_length;
+};
+
+static const struct bcmgenet_plat_data v1_plat_data = {
+ .version = GENET_V1,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v2_plat_data = {
+ .version = GENET_V2,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v3_plat_data = {
+ .version = GENET_V3,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v4_plat_data = {
+ .version = GENET_V4,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v5_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data bcm2711_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = 0x08,
+};
+
static const struct of_device_id bcmgenet_match[] = {
- { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
- { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
- { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
- { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
- { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
+ { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
+ { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
+ { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
+ { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
+ { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
+ { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3441,6 +3473,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
+ const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
@@ -3464,13 +3497,16 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
+ if (priv->irq0 < 0) {
+ err = priv->irq0;
+ goto err;
+ }
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
- if (!priv->irq0 || !priv->irq1) {
- dev_err(&pdev->dev, "can't find IRQs\n");
- err = -EINVAL;
+ if (priv->irq1 < 0) {
+ err = priv->irq1;
goto err;
}
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
if (dn)
macaddr = of_get_mac_address(dn);
@@ -3519,10 +3555,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- if (of_id)
- priv->version = (enum bcmgenet_version)of_id->data;
- else
+ if (of_id) {
+ pdata = of_id->data;
+ priv->version = pdata->version;
+ priv->dma_max_burst_length = pdata->dma_max_burst_length;
+ } else {
priv->version = pd->genet_version;
+ priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ }
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
@@ -3635,8 +3675,6 @@ static int bcmgenet_resume(struct device *d)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- phy_init_hw(dev->phydev);
-
bcmgenet_umac_reset(priv);
init_umac(priv);
@@ -3645,7 +3683,10 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
+ phy_init_hw(dev->phydev);
+
/* Speed settings must be restored */
+ genphy_config_aneg(dev->phydev);
bcmgenet_mii_config(priv->dev, false);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7fbf573d8d52..a5659197598f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -664,6 +664,7 @@ struct bcmgenet_priv {
bool crc_fwd_en;
unsigned int dma_rx_chk_bit;
+ u32 dma_max_burst_length;
u32 msg_enable;
@@ -720,8 +721,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_connect(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 17bb8d60a157..6392a2530183 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_connect(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
- struct phy_device *phydev;
- u32 phy_flags = 0;
- int ret;
-
- /* Communicate the integrated PHY revision */
- if (priv->internal_phy)
- phy_flags = priv->gphy_rev;
-
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
- if (dn) {
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- } else {
- phydev = dev->phydev;
- phydev->dev_flags = phy_flags;
-
- ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
- if (ret) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- }
-
- return 0;
-}
-
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -221,13 +181,42 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
const char *phy_name = NULL;
u32 id_mode_dis = 0;
u32 port_ctrl;
+ int bmcr = -1;
+ int ret;
u32 reg;
- priv->ext_phy = !priv->internal_phy &&
- (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+ /* MAC clocking workaround during reset of umac state machines */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_SW_RESET) {
+ /* An MII PHY must be isolated to prevent TXC contention */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret >= 0) {
+ bmcr = ret;
+ ret = phy_write(phydev, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ if (ret) {
+ netdev_err(dev, "failed to isolate PHY\n");
+ return ret;
+ }
+ }
+ /* Switch MAC clocking to RGMII generated clock */
+ bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ /* Ensure 5 clks with Rx disabled
+ * followed by 5 clks with Reset asserted
+ */
+ udelay(4);
+ reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ /* Ensure 5 more clocks before Rx is enabled */
+ udelay(2);
+ }
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_INTERNAL:
+ phy_name = "internal PHY";
+ /* fall through */
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
@@ -239,11 +228,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
else
port_ctrl = PORT_MODE_INT_EPHY;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
-
- if (priv->internal_phy) {
- phy_name = "internal PHY";
- } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ if (!phy_name) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
}
@@ -252,8 +237,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
case PHY_INTERFACE_MODE_MII:
phy_name = "external MII";
phy_set_max_speed(phydev, SPEED_100);
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+ port_ctrl = PORT_MODE_EXT_EPHY;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -268,31 +252,43 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
break;
case PHY_INTERFACE_MODE_RGMII:
/* RGMII_NO_ID: TXC transitions at the same time as TXD
* (requires PCB or receiver-side delay)
- * RGMII: Add 2ns delay on TXC (90 degree shift)
*
* ID is implicitly disabled for 100Mbps (RG)MII operation.
*/
+ phy_name = "external RGMII (no delay)";
id_mode_dis = BIT(16);
- /* fall through */
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (id_mode_dis)
- phy_name = "external RGMII (no delay)";
- else
- phy_name = "external RGMII (TX delay)";
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */
+ phy_name = "external RGMII (TX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phy_name = "external RGMII (RX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
break;
default:
dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
return -EINVAL;
}
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ /* Restore the MII PHY after isolation */
+ if (bmcr >= 0)
+ phy_write(phydev, MII_BMCR, bmcr);
+
+ priv->ext_phy = !priv->internal_phy &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
/* This is an external PHY (xMII), so we need to enable the RGMII
* block for the interface to work
*/
@@ -306,21 +302,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init) {
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
- /* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
- * that prevents the signaling of link UP interrupts when
- * the link operates at 10Mbps, so fallback to polling for
- * those versions of GENET.
- */
- if (priv->internal_phy && !GENET_IS_V5(priv))
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ return 0;
+}
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+int bcmgenet_mii_probe(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct phy_device *phydev;
+ u32 phy_flags = 0;
+ int ret;
+
+ /* Communicate the integrated PHY revision */
+ if (priv->internal_phy)
+ phy_flags = priv->gphy_rev;
+
+ /* Initialize link state variables that bcmgenet_mii_setup() uses */
+ priv->old_link = -1;
+ priv->old_speed = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+
+ if (dn) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
}
+ /* Configure port multiplexer based on what the probed PHY device since
+ * reading the 'max-speed' property determines the maximum supported
+ * PHY speed which is needed for bcmgenet_mii_config() to configure
+ * things appropriately.
+ */
+ ret = bcmgenet_mii_config(dev, true);
+ if (ret) {
+ phy_disconnect(dev->phydev);
+ return ret;
+ }
+
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
+ */
+ if (priv->internal_phy && !GENET_IS_V5(priv))
+ dev->phydev->irq = PHY_IGNORE_INTERRUPT;
+
return 0;
}
@@ -436,7 +482,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- int phy_mode;
+ phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -454,10 +500,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dn, &phy_mode);
+ if (ret) {
dev_err(kdev, "invalid PHY mode property\n");
- return phy_mode;
+ return ret;
}
priv->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 77f3511b97de..ca3aa1250dd1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index != 0)
return -EINVAL;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f4b3bd85dfe3..53b50c24d9c9 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
- select PHYLIB
+ select PHYLINK
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 03983bd46eef..19fe4f4867c7 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -7,7 +7,7 @@
#ifndef _MACB_H
#define _MACB_H
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
@@ -1185,15 +1185,14 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct device_node *phy_node;
- int link;
- int speed;
- int duplex;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u32 caps;
unsigned int dma_burst_length;
phy_interface_t phy_interface;
+ int speed;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1e1b774e1953..8fc2e21f0bb1 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -25,7 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -388,6 +388,27 @@ mdio_pm_exit:
return status;
}
+static void macb_init_buffers(struct macb *bp)
+{
+ struct macb_queue *queue;
+ unsigned int q;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+ }
+}
+
/**
* macb_set_tx_clk() - Set a clock to a new frequency
* @clk Pointer to the clock to change
@@ -432,114 +453,178 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
netdev_err(dev, "adjusting tx_clk failed.\n");
}
-static void macb_handle_link_change(struct net_device *dev)
+static void macb_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct macb *bp = netdev_priv(ndev);
+
+ /* We only support MII, RMII, GMII, RGMII & SGMII. */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_RMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ if (!macb_is_gem(bp) &&
+ (state->interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
+ (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int macb_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ return -EOPNOTSUPP;
+}
+
+static void macb_mac_an_restart(struct phylink_config *config)
+{
+ /* Not supported */
+}
+
+static void macb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
unsigned long flags;
- int status_change = 0;
+ u32 old_ctrl, ctrl;
spin_lock_irqsave(&bp->lock, flags);
- if (phydev->link) {
- if ((bp->speed != phydev->speed) ||
- (bp->duplex != phydev->duplex)) {
- u32 reg;
+ old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
- reg = macb_readl(bp, NCFGR);
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
- if (macb_is_gem(bp))
- reg &= ~GEM_BIT(GBE);
+ /* Clear all the bits we might set later */
+ ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
+ GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
- if (phydev->duplex)
- reg |= MACB_BIT(FD);
- if (phydev->speed == SPEED_100)
- reg |= MACB_BIT(SPD);
- if (phydev->speed == SPEED_1000 &&
- bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- reg |= GEM_BIT(GBE);
+ if (state->speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+ else if (state->speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
- macb_or_gem_writel(bp, NCFGR, reg);
+ if (state->duplex)
+ ctrl |= MACB_BIT(FD);
- bp->speed = phydev->speed;
- bp->duplex = phydev->duplex;
- status_change = 1;
- }
- }
+ /* We do not support MLO_PAUSE_RX yet */
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl |= MACB_BIT(PAE);
- if (phydev->link != bp->link) {
- if (!phydev->link) {
- bp->speed = 0;
- bp->duplex = -1;
- }
- bp->link = phydev->link;
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
- status_change = 1;
- }
+ /* Apply the new configuration, if any */
+ if (old_ctrl ^ ctrl)
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ bp->speed = state->speed;
spin_unlock_irqrestore(&bp->lock, flags);
+}
- if (status_change) {
- if (phydev->link) {
- /* Update the TX clock rate if and only if the link is
- * up and there has been a link change.
- */
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
+ u32 ctrl;
- netif_carrier_on(dev);
- netdev_info(dev, "link up (%d/%s)\n",
- phydev->speed,
- phydev->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- } else {
- netif_carrier_off(dev);
- netdev_info(dev, "link down\n");
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IDR,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Disable Rx and Tx */
+ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(ndev);
}
-/* based on au1000_eth. c*/
-static int macb_mii_probe(struct net_device *dev)
+static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *np;
- int ret, i;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
- np = bp->pdev->dev.of_node;
- ret = 0;
+ macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
- if (np) {
- if (of_phy_is_fixed_link(np)) {
- bp->phy_node = of_node_get(np);
- } else {
- bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
- /* fallback to standard phy registration if no
- * phy-handle was found nor any phy found during
- * dt phy registration
- */
- if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- ret = PTR_ERR(phydev);
- break;
- }
- }
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ * cleared the pipeline and control registers.
+ */
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
- if (ret)
- return -ENODEV;
- }
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IER,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Enable Rx and Tx */
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops macb_phylink_ops = {
+ .validate = macb_validate,
+ .mac_link_state = macb_mac_link_state,
+ .mac_an_restart = macb_mac_an_restart,
+ .mac_config = macb_mac_config,
+ .mac_link_down = macb_mac_link_down,
+ .mac_link_up = macb_mac_link_up,
+};
- if (bp->phy_node) {
- phydev = of_phy_connect(dev, bp->phy_node,
- &macb_handle_link_change, 0,
- bp->phy_interface);
- if (!phydev)
- return -ENODEV;
+static int macb_phylink_connect(struct macb *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (bp->pdev->dev.of_node &&
+ of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
+ ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
+ 0);
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
} else {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
@@ -548,27 +633,33 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
- bp->phy_interface);
+ ret = phylink_connect_phy(bp->phylink, phydev);
if (ret) {
- netdev_err(dev, "Could not attach to PHY\n");
+ netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
return ret;
}
}
- /* mask with MAC supported features */
- if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- phy_set_max_speed(phydev, SPEED_1000);
- else
- phy_set_max_speed(phydev, SPEED_100);
+ phylink_start(bp->phylink);
- if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ return 0;
+}
- bp->link = 0;
- bp->speed = 0;
- bp->duplex = -1;
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ bp->phylink_config.dev = &dev->dev;
+ bp->phylink_config.type = PHYLINK_NETDEV;
+
+ bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
+ bp->phy_interface, &macb_phylink_ops);
+ if (IS_ERR(bp->phylink)) {
+ netdev_err(dev, "Could not create a phylink instance (%ld)\n",
+ PTR_ERR(bp->phylink));
+ return PTR_ERR(bp->phylink);
+ }
return 0;
}
@@ -598,20 +689,10 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np && of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification %pOF\n", np);
- goto err_out_free_mdiobus;
- }
-
- err = mdiobus_register(bp->mii_bus);
- } else {
- err = of_mdiobus_register(bp->mii_bus, np);
- }
+ err = of_mdiobus_register(bp->mii_bus, np);
if (err)
- goto err_out_free_fixed_link;
+ goto err_out_free_mdiobus;
err = macb_mii_probe(bp->dev);
if (err)
@@ -621,11 +702,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
-err_out_free_fixed_link:
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
- of_node_put(bp->phy_node);
mdiobus_free(bp->mii_bus);
err_out:
return err;
@@ -1314,26 +1391,14 @@ static void macb_hresp_error_task(unsigned long data)
bp->macbgem_ops.mog_init_rings(bp);
/* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
+ macb_init_buffers(bp);
- /* Enable interrupts */
+ /* Enable interrupts */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue_writel(queue, IER,
bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
- }
ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
macb_writel(bp, NCR, ctrl);
@@ -2221,19 +2286,13 @@ static void macb_configure_dma(struct macb *bp)
static void macb_init_hw(struct macb *bp)
{
- struct macb_queue *queue;
- unsigned int q;
-
u32 config;
macb_reset_hw(bp);
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
- config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
- config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2249,36 +2308,11 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCFGR, config);
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
- bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
-
- /* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
-
- /* Enable interrupts */
- queue_writel(queue, IER,
- bp->rx_intr_mask |
- MACB_TX_INT_FLAGS |
- MACB_BIT(HRESP));
- }
-
- /* Enable TX and RX */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
@@ -2402,8 +2436,8 @@ static void macb_set_rx_mode(struct net_device *dev)
static int macb_open(struct net_device *dev)
{
- struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int q;
int err;
@@ -2414,15 +2448,6 @@ static int macb_open(struct net_device *dev)
if (err < 0)
goto pm_exit;
- /* carrier starts down */
- netif_carrier_off(dev);
-
- /* if the phy is not yet register, retry later*/
- if (!dev->phydev) {
- err = -EAGAIN;
- goto pm_exit;
- }
-
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2436,11 +2461,11 @@ static int macb_open(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
- /* schedule a link state check */
- phy_start(dev->phydev);
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto pm_exit;
netif_tx_start_all_queues(dev);
@@ -2467,8 +2492,8 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
- if (dev->phydev)
- phy_stop(dev->phydev);
+ phylink_stop(bp->phylink);
+ phylink_disconnect_phy(bp->phylink);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2702,17 +2727,18 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->supported = 0;
wol->wolopts = 0;
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ phylink_ethtool_get_wol(bp->phylink, wol);
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ ret = phylink_ethtool_set_wol(bp->phylink, wol);
+ if (!ret)
+ return 0;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -2728,6 +2754,22 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int macb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(bp->phylink, kset);
+}
+
+static int macb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(bp->phylink, kset);
+}
+
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -3164,8 +3206,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
};
@@ -3178,8 +3220,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
@@ -3188,26 +3230,21 @@ static const struct ethtool_ops gem_ethtool_ops = {
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct phy_device *phydev = dev->phydev;
struct macb *bp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
- if (!phydev)
- return -ENODEV;
-
- if (!bp->ptp_info)
- return phy_mii_ioctl(phydev, rq, cmd);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- default:
- return phy_mii_ioctl(phydev, rq, cmd);
+ if (bp->ptp_info) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
+ case SIOCGHWTSTAMP:
+ return bp->ptp_info->get_hwtst(dev, rq);
+ }
}
+
+ return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
static inline void macb_set_txcsum_feature(struct macb *bp,
@@ -3330,7 +3367,8 @@ static void macb_configure_caps(struct macb *bp,
#ifdef CONFIG_MACB_USE_HWSTAMP
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
- pr_err("GEM doesn't support hardware ptp.\n");
+ dev_err(&bp->pdev->dev,
+ "GEM doesn't support hardware ptp.\n");
else {
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
bp->ptp_info = &gem_ptp_info;
@@ -3707,8 +3745,9 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
- /* schedule a link state check */
- phy_start(dev->phydev);
+ ret = macb_phylink_connect(lp);
+ if (ret)
+ return ret;
netif_start_queue(dev);
@@ -3737,6 +3776,9 @@ static int at91ether_close(struct net_device *dev)
netif_stop_queue(dev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
@@ -4181,7 +4223,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
bool native_io;
- struct phy_device *phydev;
+ phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
@@ -4308,12 +4350,14 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
- err = of_get_phy_mode(np);
- if (err < 0)
+ err = of_get_phy_mode(np, &interface);
+ if (err)
/* not found in DT, MII by default */
bp->phy_interface = PHY_INTERFACE_MODE_MII;
else
- bp->phy_interface = err;
+ bp->phy_interface = interface;
+
+ bp->speed = SPEED_UNKNOWN;
/* IP specific init */
err = init(pdev);
@@ -4324,8 +4368,6 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = dev->phydev;
-
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -4337,8 +4379,6 @@ static int macb_probe(struct platform_device *pdev)
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
- phy_attached_info(phydev);
-
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
@@ -4349,11 +4389,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- of_node_put(bp->phy_node);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
err_out_free_netdev:
@@ -4377,18 +4413,12 @@ static int macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
- struct device_node *np = pdev->dev.of_node;
dev = platform_get_drvdata(pdev);
if (dev) {
bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
@@ -4403,7 +4433,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
- of_node_put(bp->phy_node);
+ phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -4421,7 +4451,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!netif_running(netdev))
return 0;
-
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
@@ -4432,8 +4461,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_disable(&queue->napi);
- phy_stop(netdev->phydev);
- phy_suspend(netdev->phydev);
+ rtnl_lock();
+ phylink_stop(bp->phylink);
+ rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -4481,12 +4511,11 @@ static int __maybe_unused macb_resume(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_enable(&queue->napi);
- phy_resume(netdev->phydev);
- phy_init_hw(netdev->phydev);
- phy_start(netdev->phydev);
+ rtnl_lock();
+ phylink_start(bp->phylink);
+ rtnl_unlock();
}
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
macb_set_rx_mode(netdev);
macb_restore_features(bp);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 0e5de88fd6e8..cdd7e5da4a74 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
- netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
+ netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
mac = of_get_mac_address(pdev->dev.of_node);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 20390f6afbb4..a4b4d475abf8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
- cudbg_common.o cudbg_lib.o cudbg_zlib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \
+ cxgb4_tc_matchall.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 69746696a929..f5be3ee1bdb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
CUDBG_QTYPE_CRYPTO_FLQ,
CUDBG_QTYPE_TLS_RXQ,
CUDBG_QTYPE_TLS_FLQ,
+ CUDBG_QTYPE_ETHOFLD_TXQ,
+ CUDBG_QTYPE_ETHOFLD_RXQ,
+ CUDBG_QTYPE_ETHOFLD_FLQ,
CUDBG_QTYPE_MAX,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index c2e92786608b..19c11568113a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -4,6 +4,7 @@
*/
#include <linux/sort.h>
+#include <linux/string.h>
#include "t4_regs.h"
#include "cxgb4.h"
@@ -776,24 +777,18 @@ static int cudbg_get_mem_region(struct adapter *padap,
struct cudbg_mem_desc *mem_desc)
{
u8 mc, found = 0;
- u32 i, idx = 0;
- int rc;
+ u32 idx = 0;
+ int rc, i;
rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
if (rc)
return rc;
- for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
- if (!strcmp(cudbg_region[i], region_name)) {
- found = 1;
- idx = i;
- break;
- }
- }
- if (!found)
+ i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name);
+ if (i < 0)
return -EINVAL;
- found = 0;
+ idx = i;
for (i = 0; i < meminfo->mem_c; i++) {
if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* Skip holes */
@@ -2930,6 +2925,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
MAX_RXQ_DESC_SIZE;
+ /* ETHOFLD TXQ, RXQ, and FLQ */
+ tot_entries += MAX_OFLD_QSETS * 3;
+ tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+
tot_size += sizeof(struct cudbg_ver_hdr) +
sizeof(struct cudbg_qdesc_info) +
sizeof(struct cudbg_qdesc_entry) * tot_entries;
@@ -3087,6 +3086,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
}
}
+ /* ETHOFLD TXQ */
+ if (s->eohw_txq)
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_TXQ(&s->eohw_txq[i].q,
+ CUDBG_QTYPE_ETHOFLD_TXQ, out);
+
+ /* ETHOFLD RXQ and FLQ */
+ if (s->eohw_rxq) {
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
+ CUDBG_QTYPE_ETHOFLD_RXQ, out);
+
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
+ CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ }
+
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1fbb640e896a..3121ed83d8e2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -392,6 +392,7 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char crypto; /* HW capability for crypto */
+ unsigned char ethofld; /* QoS support */
unsigned char bypass;
unsigned char hash_filter;
@@ -602,6 +603,8 @@ struct port_info {
u8 vivld;
u8 smt_idx;
u8 rx_cchan;
+
+ bool tc_block_shared;
};
struct dentry;
@@ -711,6 +714,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */
@@ -724,6 +728,7 @@ struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_ofld_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct tx_desc {
@@ -788,7 +793,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
- u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
@@ -801,6 +805,55 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
+enum sge_eosw_state {
+ CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
+ CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
+};
+
+struct sge_eosw_desc {
+ struct sk_buff *skb; /* SKB to free after getting completion */
+ dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
+};
+
+struct sge_eosw_txq {
+ spinlock_t lock; /* Per queue lock to synchronize completions */
+ enum sge_eosw_state state; /* Current ETHOFLD State */
+ struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */
+ u32 ndesc; /* Number of descriptors */
+ u32 pidx; /* Current Producer Index */
+ u32 last_pidx; /* Last successfully transmitted Producer Index */
+ u32 cidx; /* Current Consumer Index */
+ u32 last_cidx; /* Last successfully reclaimed Consumer Index */
+ u32 flowc_idx; /* Descriptor containing a FLOWC request */
+ u32 inuse; /* Number of packets held in ring */
+
+ u32 cred; /* Current available credits */
+ u32 ncompl; /* # of completions posted */
+ u32 last_compl; /* # of credits consumed since last completion req */
+
+ u32 eotid; /* Index into EOTID table in software */
+ u32 hwtid; /* Hardware EOTID index */
+
+ u32 hwqid; /* Underlying hardware queue index */
+ struct net_device *netdev; /* Pointer to netdevice */
+ struct tasklet_struct qresume_tsk; /* Restarts the queue */
+ struct completion completion; /* completion for FLOWC rendezvous */
+};
+
+struct sge_eohw_txq {
+ spinlock_t lock; /* Per queue lock */
+ struct sge_txq q; /* HW Txq */
+ struct adapter *adap; /* Backpointer to adapter */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_txq ptptxq;
@@ -814,11 +867,16 @@ struct sge {
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
+ struct sge_eohw_txq *eohw_txq;
+ struct sge_ofld_rxq *eohw_rxq;
+
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
+ u16 eoqsets; /* # of ETHOFLD queues */
+
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u16 dbqtimer_tick;
@@ -841,6 +899,9 @@ struct sge {
unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
+
+ int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
+ int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
@@ -870,13 +931,13 @@ struct hash_mac_addr {
unsigned int iface_mac;
};
-struct uld_msix_bmap {
+struct msix_bmap {
unsigned long *msix_bmap;
unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */
};
-struct uld_msix_info {
+struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
@@ -945,14 +1006,9 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- struct msix_info {
- unsigned short vec;
- char desc[IFNAMSIZ + 10];
- cpumask_var_t aff_mask;
- } msix_info[MAX_INGQ + 1];
- struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
- struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
- int msi_idx;
+ /* MSI-X Info for NIC and OFLD queues */
+ struct msix_info *msix_info;
+ struct msix_bmap msix_bmap;
struct doorbell_stats db_stats;
struct sge sge;
@@ -1044,6 +1100,12 @@ struct adapter {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal ch_thermal;
#endif
+
+ /* TC MQPRIO offload */
+ struct cxgb4_tc_mqprio *tc_mqprio;
+
+ /* TC MATCHALL classifier offload */
+ struct cxgb4_tc_matchall *tc_matchall;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1073,10 +1135,12 @@ enum {
enum {
SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
+ SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */
};
enum {
SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
+ SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */
};
enum {
@@ -1100,6 +1164,14 @@ struct ch_sched_queue {
s8 class; /* class index */
};
+/* Support for "sched_flowc" command to allow one or more FLOWC
+ * to be bound to a TX Scheduling Class.
+ */
+struct ch_sched_flowc {
+ s32 tid; /* TID to bind */
+ s8 class; /* class index */
+};
+
/* Defined bit width of user definable filter tuples
*/
#define ETHTYPE_BITWIDTH 16
@@ -1214,8 +1286,11 @@ struct ch_filter_specification {
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
+ u32 tc_prio; /* TC's filter priority index */
+ u64 tc_cookie; /* Unique cookie identifying TC rules */
+
/* reservation for future additions */
- u8 rsvd[24];
+ u8 rsvd[12];
/* Filter rule value/mask pairs.
*/
@@ -1293,6 +1368,11 @@ static inline int is_uld(const struct adapter *adap)
return (adap->params.offload || adap->params.crypto);
}
+static inline int is_ethofld(const struct adapter *adap)
+{
+ return adap->params.ethofld;
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1426,6 +1506,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type);
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid);
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
@@ -1890,6 +1973,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
+void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
+ u32 ndesc);
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
+void cxgb4_ethofld_restart(unsigned long data);
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
void cxgb4_reclaim_completed_tx(struct adapter *adap,
struct sge_txq *q, bool unmap);
@@ -1948,5 +2037,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
-
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
+int cxgb_open(struct net_device *dev);
+int cxgb_close(struct net_device *dev);
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
+void cxgb4_quiesce_rx(struct sge_rspq *q);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ae6a47dd7dc9..a13b03f771cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int eth_entries, ctrl_entries, eo_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1;
- int eth_entries, ctrl_entries;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+ if (adap->sge.eohw_txq)
+ eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
@@ -2761,6 +2763,54 @@ do { \
}
r -= eth_entries;
+ if (r < eo_entries) {
+ int base_qset = r * 4;
+ const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
+ const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
+
+ n = min(4, s->eoqsets - 4 * r);
+
+ S("QType:", "ETHOFLD");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImm:", stats.imm);
+ RL("RxAN", stats.an);
+ RL("RxNoMem", stats.nomem);
+ TL("TSO:", tso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
+ goto unlock;
+ }
+
+ r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex);
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+ (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 43b0f8c57da7..1d39fca11810 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
+ bool found = false;
+ u8 i, n, cnt;
int ftid;
- spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET) {
- ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
- if (ftid >= t->nftids)
- ftid = -1;
- } else {
- if (is_t6(adap->params.chip)) {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 1);
- if (ftid < 0)
- goto out_unlock;
-
- /* this is only a lookup, keep the found region
- * unallocated
- */
- bitmap_release_region(t->ftid_bmap, ftid, 1);
- } else {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
+ * on T5.
+ */
+ n = 1;
+ if (family == PF_INET6) {
+ n++;
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
+ n += 2;
+ }
+
+ if (n > t->nftids)
+ return -ENOMEM;
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ /* Find free filter slots from the end of TCAM. Appropriate
+ * checks must be done by caller later to ensure the prio
+ * passed by TC doesn't conflict with prio saved by existing
+ * rules in the TCAM.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ ftid = t->nftids - 1;
+ while (ftid >= n - 1) {
+ cnt = 0;
+ for (i = 0; i < n; i++) {
+ if (test_bit(ftid - i, t->ftid_bmap))
+ break;
+ cnt++;
}
+ if (cnt == n) {
+ ftid &= ~(n - 1);
+ found = true;
+ break;
+ }
+
+ ftid -= n;
}
-out_unlock:
spin_unlock_bh(&t->ftid_lock);
- return ftid;
+
+ return found ? ftid : -ENOMEM;
}
static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
@@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct filter_entry *prev_fe, *next_fe;
+ struct tid_info *t = &adap->tids;
+ u32 prev_ftid, next_ftid;
+ bool valid = true;
+
+ /* Only insert the rule if both of the following conditions
+ * are met:
+ * 1. The immediate previous rule has priority <= @prio.
+ * 2. The immediate next rule has priority >= @prio.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ /* Don't insert if there's a rule already present at @idx. */
+ if (test_bit(idx, t->ftid_bmap)) {
+ valid = false;
+ goto out_unlock;
+ }
+
+ next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+
+ next_fe = &adap->tids.ftid_tab[next_ftid];
+
+ prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ if (prev_ftid >= idx)
+ prev_ftid = idx;
+
+ /* See if the filter entry belongs to an IPv6 rule, which
+ * occupy 4 slots on T5 and 2 slots on T6. Adjust the
+ * reference to the previously inserted filter entry
+ * accordingly.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ } else {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ }
+
+ if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
+ (next_fe->valid && prio > next_fe->fs.tc_prio))
+ valid = false;
+
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+ return valid;
+}
+
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
@@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.tos |= ~0;
if (fs->val.proto && !fs->mask.proto)
fs->mask.proto |= ~0;
+ if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
+ fs->mask.pfvf_vld |= ~0;
+ if (fs->val.pf && !fs->mask.pf)
+ fs->mask.pf |= ~0;
+ if (fs->val.vf && !fs->mask.vf)
+ fs->mask.vf |= ~0;
for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
lip |= fs->val.lip[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index b0751c0611ec..b3e4a645043d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38024877751c..e8a1826a1e90 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
+#include <net/xfrm.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -82,6 +83,8 @@
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
+#include "cxgb4_tc_mqprio.h"
+#include "cxgb4_tc_matchall.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
@@ -184,6 +187,8 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+static int cfg_queues(struct adapter *adap);
+
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
@@ -683,31 +688,6 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
return IRQ_HANDLED;
}
-/*
- * Name the MSI-X interrupts.
- */
-static void name_msix_vecs(struct adapter *adap)
-{
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
-
- /* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
-
- /* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
- adap->port[0]->name);
-
- /* Ethernet queues */
- for_each_port(adap, j) {
- struct net_device *d = adap->port[j];
- const struct port_info *pi = netdev_priv(d);
-
- for (i = 0; i < pi->nqsets; i++, msi_idx++)
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
- d->name, i);
- }
-}
-
int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
cpumask_var_t *aff_mask, int idx)
{
@@ -741,15 +721,19 @@ static int request_msix_queue_irqs(struct adapter *adap)
struct sge *s = &adap->sge;
struct msix_info *minfo;
int err, ethqidx;
- int msi_index = 2;
- err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
- adap->msix_info[1].desc, &s->fw_evtq);
+ if (s->fwevtq_msix_idx < 0)
+ return -ENOMEM;
+
+ err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[s->fwevtq_msix_idx].desc,
+ &s->fw_evtq);
if (err)
return err;
for_each_ethrxq(s, ethqidx) {
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -759,18 +743,16 @@ static int request_msix_queue_irqs(struct adapter *adap)
cxgb4_set_msix_aff(adap, minfo->vec,
&minfo->aff_mask, ethqidx);
- msi_index++;
}
return 0;
unwind:
while (--ethqidx >= 0) {
- msi_index--;
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
}
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
return err;
}
@@ -778,11 +760,11 @@ static void free_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
struct msix_info *minfo;
- int i, msi_index = 2;
+ int i;
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
for_each_ethrxq(s, i) {
- minfo = &adap->msix_info[msi_index++];
+ minfo = s->ethrxq[i].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[i].rspq);
}
@@ -899,6 +881,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
+void cxgb4_quiesce_rx(struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_disable(&q->napi);
+}
+
/*
* Wait until all NAPI handlers are descheduled.
*/
@@ -909,19 +897,24 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler)
- napi_disable(&q->napi);
+ if (!q)
+ continue;
+
+ cxgb4_quiesce_rx(q);
}
}
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
+
if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
- free_irq(adap->msix_info[0].vec, adap);
+ free_irq(adap->msix_info[s->nd_msix_idx].vec,
+ adap);
} else {
free_irq(adap->pdev->irq, adap);
}
@@ -929,6 +922,17 @@ static void disable_interrupts(struct adapter *adap)
}
}
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_enable(&q->napi);
+
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,37 +945,63 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler)
- napi_enable(&q->napi);
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
+ cxgb4_enable_rx(adap, q);
}
}
+static int setup_non_data_intr(struct adapter *adap)
+{
+ int msix;
+
+ adap->sge.nd_msix_idx = -1;
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ return 0;
+
+ /* Request MSI-X vector for non-data interrupt */
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s", adap->port[0]->name);
+
+ adap->sge.nd_msix_idx = msix;
+ return 0;
+}
static int setup_fw_sge_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err = 0;
+ int msix, err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & CXGB4_USING_MSIX)
- adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
+ if (adap->flags & CXGB4_USING_MSIX) {
+ s->fwevtq_msix_idx = -1;
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-FWeventq", adap->port[0]->name);
+ } else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL, NULL, -1);
if (err)
return err;
- adap->msi_idx = -((int)s->intrq.abs_id + 1);
+ msix = -((int)s->intrq.abs_id + 1);
}
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+ msix, NULL, fwevtq_handler, NULL, -1);
+ if (err && msix >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, msix);
+
+ s->fwevtq_msix_idx = msix;
return err;
}
@@ -985,14 +1015,17 @@ static int setup_fw_sge_queues(struct adapter *adap)
*/
static int setup_sge_queues(struct adapter *adap)
{
- int err, i, j;
- struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info = NULL;
+ struct sge *s = &adap->sge;
unsigned int cmplqid = 0;
+ int err, i, j, msix = 0;
if (is_uld(adap))
rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)s->intrq.abs_id + 1);
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -1000,10 +1033,21 @@ static int setup_sge_queues(struct adapter *adap)
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
- if (adap->msi_idx > 0)
- adap->msi_idx++;
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ err = msix;
+ goto freeout;
+ }
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-Rx%d", dev->name, j);
+ q->msix = &adap->msix_info[msix];
+ }
+
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- adap->msi_idx, &q->fl,
+ msix, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_tp_ch_map(adap,
@@ -1090,6 +1134,18 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+ if (dev->num_tc) {
+ struct port_info *pi = netdev2pinfo(dev);
+
+ /* Send unsupported traffic pattern to normal NIC queues. */
+ txq = netdev_pick_tx(dev, skb, sb_dev);
+ if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
+ ip_hdr(skb)->protocol != IPPROTO_TCP)
+ txq = txq % pi->nqsets;
+
+ return txq;
+ }
+
if (select_queue) {
txq = (skb_rx_queue_recorded(skb)
? skb_get_rx_queue(skb)
@@ -1456,19 +1512,23 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
- ftid_bmap_size * sizeof(long);
+ ftid_bmap_size * sizeof(long) +
+ t->neotids * sizeof(*t->eotid_tab) +
+ eotid_bmap_size * sizeof(long);
t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
@@ -1479,6 +1539,8 @@ static int tid_init(struct tid_info *t)
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
+ t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
+ t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
spin_lock_init(&t->ftid_lock);
@@ -1505,6 +1567,9 @@ static int tid_init(struct tid_info *t)
if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
+
+ if (t->neotids)
+ bitmap_zero(t->eotid_bmap, t->neotids);
}
bitmap_zero(t->ftid_bmap, t->nftids);
@@ -2361,6 +2426,7 @@ static void update_clip(const struct adapter *adap)
*/
static int cxgb_up(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
int err;
mutex_lock(&uld_mutex);
@@ -2372,16 +2438,20 @@ static int cxgb_up(struct adapter *adap)
goto freeq;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs(adap);
- err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
- adap->msix_info[0].desc, adap);
+ if (s->nd_msix_idx < 0) {
+ err = -ENOMEM;
+ goto irq_err;
+ }
+
+ err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
+ t4_nondata_intr, 0,
+ adap->msix_info[s->nd_msix_idx].desc, adap);
if (err)
goto irq_err;
+
err = request_msix_queue_irqs(adap);
- if (err) {
- free_irq(adap->msix_info[0].vec, adap);
- goto irq_err;
- }
+ if (err)
+ goto irq_err_free_nd_msix;
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & CXGB4_USING_MSI) ? 0
@@ -2403,11 +2473,13 @@ static int cxgb_up(struct adapter *adap)
#endif
return err;
- irq_err:
+irq_err_free_nd_msix:
+ free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
+irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
- freeq:
+freeq:
t4_free_sge_resources(adap);
- rel_lock:
+rel_lock:
mutex_unlock(&uld_mutex);
return err;
}
@@ -2429,11 +2501,11 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-static int cxgb_open(struct net_device *dev)
+int cxgb_open(struct net_device *dev)
{
- int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ int err;
netif_carrier_off(dev);
@@ -2456,7 +2528,7 @@ static int cxgb_open(struct net_device *dev)
return err;
}
-static int cxgb_close(struct net_device *dev)
+int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3163,8 +3235,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
}
}
-static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int cxgb_setup_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!adap->tc_matchall)
+ return -ENOMEM;
+
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_STATS:
+ if (ingress)
+ return cxgb4_tc_matchall_stats(dev, cls_matchall);
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
@@ -3185,24 +3282,81 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return cxgb_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_CLSFLOWER:
return cxgb_setup_tc_flower(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, true);
default:
return -EOPNOTSUPP;
}
}
+static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, false);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!is_ethofld(adap) || !adap->tc_mqprio)
+ return -ENOMEM;
+
+ return cxgb4_setup_tc_mqprio(dev, mqprio);
+}
+
static LIST_HEAD(cxgb_block_cb_list);
+static int cxgb_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct port_info *pi = netdev_priv(dev);
+ flow_setup_cb_t *cb;
+ bool ingress_only;
+
+ pi->tc_block_shared = f->block_shared;
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = cxgb_setup_tc_block_egress_cb;
+ ingress_only = false;
+ } else {
+ cb = cxgb_setup_tc_block_ingress_cb;
+ ingress_only = true;
+ }
+
+ return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
+ cb, pi, dev, ingress_only);
+}
+
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct port_info *pi = netdev2pinfo(dev);
-
switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return cxgb_setup_tc_mqprio(dev, type_data);
case TC_SETUP_BLOCK:
- return flow_block_cb_setup_simple(type_data,
- &cxgb_block_cb_list,
- cxgb_setup_tc_block_cb,
- pi, dev, true);
+ return cxgb_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4286,14 +4440,14 @@ static struct fw_info *find_fw_info(int chip)
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
-static int adap_init0(struct adapter *adap)
+static int adap_init0(struct adapter *adap, int vpd_skip)
{
- int ret;
- u32 v, port_vec;
- enum dev_state state;
- u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ u32 params[7], val[7];
+ enum dev_state state;
+ u32 v, port_vec;
int reset = 1;
+ int ret;
/* Grab Firmware Device Log parameters as early as possible so we have
* access to it for debugging, etc.
@@ -4448,9 +4602,11 @@ static int adap_init0(struct adapter *adap)
* could have FLASHed a new VPD which won't be read by the firmware
* until we do the RESET ...
*/
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
+ if (!vpd_skip) {
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+ }
/* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -4600,11 +4756,18 @@ static int adap_init0(struct adapter *adap)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
- /* We don't yet have a PARAMs calls to retrieve the number of Traffic
- * Classes supported by the hardware/firmware so we hard code it here
- * for now.
- */
- adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ /* Get the supported number of traffic classes */
+ params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+ if (ret < 0) {
+ /* We couldn't retrieve the number of Traffic Classes
+ * supported by the hardware/firmware. So we hard
+ * code it here.
+ */
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ } else {
+ adap->params.nsched_cls = val[0];
+ }
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
@@ -4689,7 +4852,8 @@ static int adap_init0(struct adapter *adap)
adap->params.offload = 1;
if (caps_cmd.ofldcaps ||
- (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -4731,6 +4895,19 @@ static int adap_init0(struct adapter *adap)
} else {
adap->num_ofld_uld += 1;
}
+
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
+ params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+ params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (!ret) {
+ adap->tids.eotid_base = val[0];
+ adap->tids.neotids = min_t(u32, MAX_ATIDS,
+ val[1] - val[0] + 1);
+ adap->params.ethofld = 1;
+ }
+ }
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -5050,10 +5227,93 @@ static void eeh_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void eeh_reset_prepare(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ if (adapter->pf != 4)
+ return;
+
+ adapter->flags &= ~CXGB4_FW_OK;
+
+ notify_ulds(adapter, CXGB4_STATE_DOWN);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ disable_interrupts(adapter);
+ cxgb4_free_mps_ref_entries(adapter);
+
+ adap_free_hma_mem(adapter);
+
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+}
+
+static void eeh_reset_done(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int err, i;
+
+ if (adapter->pf != 4)
+ return;
+
+ err = t4_wait_dev_ready(adapter->regs);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev,
+ "Device not ready, err %d", err);
+ return;
+ }
+
+ setup_memwin(adapter);
+
+ err = adap_init0(adapter, 1);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Adapter init failed, err %d", err);
+ return;
+ }
+
+ setup_memwin_rdma(adapter);
+
+ if (adapter->flags & CXGB4_FW_OK) {
+ err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Port init failed, err %d", err);
+ return;
+ }
+ }
+
+ err = cfg_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Config queues failed, err %d", err);
+ return;
+ }
+
+ cxgb4_init_mps_ref_entries(adapter);
+
+ err = setup_fw_sge_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "FW sge queue allocation failed, err %d", err);
+ return;
+ }
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_open(adapter->port[i]);
+}
+
static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
+ .reset_prepare = eeh_reset_prepare,
+ .reset_done = eeh_reset_done,
};
/* Return true if the Link Configuration supports "High Speeds" (those greater
@@ -5070,26 +5330,25 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-/*
- * Perform default configuration of DMA queues depending on the number and type
+/* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static int cfg_queues(struct adapter *adap)
{
+ u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
- int niqflint, neq, avail_eth_qsets;
- int max_eth_qsets = 32;
+ u32 i, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- /* Reduce memory usage in kdump environment, disable all offload.
- */
+ /* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
+ adap->params.ethofld = 0;
}
/* Calculate the number of Ethernet Queue Sets available based on
@@ -5108,14 +5367,11 @@ static int cfg_queues(struct adapter *adap)
if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
- avail_eth_qsets = min(niqflint, neq);
+ avail_qsets = min(niqflint, neq);
- if (avail_eth_qsets > max_eth_qsets)
- avail_eth_qsets = max_eth_qsets;
-
- if (avail_eth_qsets < adap->params.nports) {
+ if (avail_qsets < adap->params.nports) {
dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
- avail_eth_qsets, adap->params.nports);
+ avail_qsets, adap->params.nports);
return -ENOMEM;
}
@@ -5123,6 +5379,7 @@ static int cfg_queues(struct adapter *adap)
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -5142,8 +5399,7 @@ static int cfg_queues(struct adapter *adap)
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
- /*
- * We default to 1 queue per non-10G port and up to # of cores queues
+ /* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
@@ -5165,19 +5421,40 @@ static int cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
+ avail_qsets -= qidx;
if (is_uld(adap)) {
- /*
- * For offload we use 1 queue/channel if all ports are up to 1G,
+ /* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
- if (n10g) {
- i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
- } else {
+ num_ulds = adap->num_uld + adap->num_ofld_uld;
+ i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ avail_uld_qsets = roundup(i, adap->params.nports);
+ if (avail_qsets < num_ulds * adap->params.nports) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ s->ofldqsets = 0;
+ } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
s->ofldqsets = adap->params.nports;
+ } else {
+ s->ofldqsets = avail_uld_qsets;
+ }
+
+ avail_qsets -= num_ulds * s->ofldqsets;
+ }
+
+ /* ETHOFLD Queues used for QoS offload should follow same
+ * allocation scheme as normal Ethernet Queues.
+ */
+ if (is_ethofld(adap)) {
+ if (avail_qsets < s->max_ethqsets) {
+ adap->params.ethofld = 0;
+ s->eoqsets = 0;
+ } else {
+ s->eoqsets = s->max_ethqsets;
}
+ avail_qsets -= s->eoqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5230,42 +5507,62 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
-static int get_msix_info(struct adapter *adap)
+static int alloc_msix_info(struct adapter *adap, u32 num_vec)
{
- struct uld_msix_info *msix_info;
- unsigned int max_ingq = 0;
-
- if (is_offload(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
- if (is_pci_uld(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_uld;
-
- if (!max_ingq)
- goto out;
+ struct msix_info *msix_info;
- msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
+ msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
- sizeof(long), GFP_KERNEL);
- if (!adap->msix_bmap_ulds.msix_bmap) {
+ adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
}
- spin_lock_init(&adap->msix_bmap_ulds.lock);
- adap->msix_info_ulds = msix_info;
-out:
+
+ spin_lock_init(&adap->msix_bmap.lock);
+ adap->msix_bmap.mapsize = num_vec;
+
+ adap->msix_info = msix_info;
return 0;
}
static void free_msix_info(struct adapter *adap)
{
- if (!(adap->num_uld && adap->num_ofld_uld))
- return;
+ kfree(adap->msix_bmap.msix_bmap);
+ kfree(adap->msix_info);
+}
- kfree(adap->msix_info_ulds);
- kfree(adap->msix_bmap_ulds.msix_bmap);
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned int msix_idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
+ if (msix_idx < bmap->mapsize) {
+ __set_bit(msix_idx, bmap->msix_bmap);
+ } else {
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return -ENOSPC;
+ }
+
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return msix_idx;
+}
+
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
+ unsigned int msix_idx)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ __clear_bit(msix_idx, bmap->msix_bmap);
+ spin_unlock_irqrestore(&bmap->lock, flags);
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
@@ -5273,88 +5570,161 @@ static void free_msix_info(struct adapter *adap)
static int enable_msix(struct adapter *adap)
{
- int ofld_need = 0, uld_need = 0;
- int i, j, want, need, allocated;
+ u32 eth_need, uld_need = 0, ethofld_need = 0;
+ u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
+ u8 num_uld = 0, nchan = adap->params.nports;
+ u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
- unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
- int max_ingq = MAX_INGQ;
-
- if (is_pci_uld(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
- if (is_offload(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
- entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- /* map for msix */
- if (get_msix_info(adap)) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- }
-
- for (i = 0; i < max_ingq + 1; ++i)
- entries[i].entry = i;
+ struct port_info *pi;
+ int allocated, ret;
- want = s->max_ethqsets + EXTRA_VECS;
- if (is_offload(adap)) {
- want += adap->num_ofld_uld * s->ofldqsets;
- ofld_need = adap->num_ofld_uld * nchan;
- }
- if (is_pci_uld(adap)) {
- want += adap->num_uld * s->ofldqsets;
- uld_need = adap->num_uld * nchan;
- }
+ want = s->max_ethqsets;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = 8 * nchan;
#else
- need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = nchan;
#endif
+ eth_need = need;
+ if (is_uld(adap)) {
+ num_uld = adap->num_ofld_uld + adap->num_uld;
+ want += num_uld * s->ofldqsets;
+ uld_need = num_uld * nchan;
+ need += uld_need;
+ }
+
+ if (is_ethofld(adap)) {
+ want += s->eoqsets;
+ ethofld_need = eth_need;
+ need += ethofld_need;
+ }
+
+ want += EXTRA_VECS;
+ need += EXTRA_VECS;
+
+ entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < want; i++)
+ entries[i].entry = i;
+
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
- dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
- " not using MSI-X\n");
- kfree(entries);
- return allocated;
+ /* Disable offload and attempt to get vectors for NIC
+ * only mode.
+ */
+ want = s->max_ethqsets + EXTRA_VECS;
+ need = eth_need + EXTRA_VECS;
+ allocated = pci_enable_msix_range(adap->pdev, entries,
+ need, want);
+ if (allocated < 0) {
+ dev_info(adap->pdev_dev,
+ "Disabling MSI-X due to insufficient MSI-X vectors\n");
+ ret = allocated;
+ goto out_free;
+ }
+
+ dev_info(adap->pdev_dev,
+ "Disabling offload due to insufficient MSI-X vectors\n");
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ adap->params.ethofld = 0;
+ s->ofldqsets = 0;
+ s->eoqsets = 0;
+ uld_need = 0;
+ ethofld_need = 0;
+ }
+
+ num_vec = allocated;
+ if (num_vec < want) {
+ /* Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ ethqsets = eth_need;
+ if (is_uld(adap))
+ ofldqsets = nchan;
+ if (is_ethofld(adap))
+ eoqsets = ethofld_need;
+
+ num_vec -= need;
+ while (num_vec) {
+ if (num_vec < eth_need + ethofld_need ||
+ ethqsets > s->max_ethqsets)
+ break;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets < 2)
+ continue;
+
+ ethqsets++;
+ num_vec--;
+ if (ethofld_need) {
+ eoqsets++;
+ num_vec--;
+ }
+ }
+ }
+
+ if (is_uld(adap)) {
+ while (num_vec) {
+ if (num_vec < uld_need ||
+ ofldqsets > s->ofldqsets)
+ break;
+
+ ofldqsets++;
+ num_vec -= uld_need;
+ }
+ }
+ } else {
+ ethqsets = s->max_ethqsets;
+ if (is_uld(adap))
+ ofldqsets = s->ofldqsets;
+ if (is_ethofld(adap))
+ eoqsets = s->eoqsets;
}
- /* Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = allocated - EXTRA_VECS - ofld_need - uld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
+ if (ethqsets < s->max_ethqsets) {
+ s->max_ethqsets = ethqsets;
+ reduce_ethqs(adap, ethqsets);
}
+
if (is_uld(adap)) {
- if (allocated < want)
- s->nqs_per_uld = nchan;
- else
- s->nqs_per_uld = s->ofldqsets;
+ s->ofldqsets = ofldqsets;
+ s->nqs_per_uld = s->ofldqsets;
}
- for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
+ if (is_ethofld(adap))
+ s->eoqsets = eoqsets;
+
+ /* map for msix */
+ ret = alloc_msix_info(adap, allocated);
+ if (ret)
+ goto out_disable_msix;
+
+ for (i = 0; i < allocated; i++) {
adap->msix_info[i].vec = entries[i].vector;
- if (is_uld(adap)) {
- for (j = 0 ; i < allocated; ++i, j++) {
- adap->msix_info_ulds[j].vec = entries[i].vector;
- adap->msix_info_ulds[j].idx = i;
- }
- adap->msix_bmap_ulds.mapsize = j;
+ adap->msix_info[i].idx = i;
}
- dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d per uld %d\n",
- allocated, s->max_ethqsets, s->nqs_per_uld);
+
+ dev_info(adap->pdev_dev,
+ "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
+ allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
kfree(entries);
return 0;
+
+out_disable_msix:
+ pci_disable_msix(adap->pdev);
+
+out_free:
+ kfree(entries);
+ return ret;
}
#undef EXTRA_VECS
@@ -5441,6 +5811,8 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_matchall(adapter);
+ cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
@@ -5837,7 +6209,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
setup_memwin(adapter);
- err = adap_init0(adapter);
+ err = adap_init0(adapter, 0);
#ifdef CONFIG_DEBUG_FS
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
@@ -5855,8 +6227,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&adapter->mac_hlist);
for_each_port(adapter, i) {
+ /* For supporting MQPRIO Offload, need some extra
+ * queues for each ETHOFLD TIDs. Keep it equal to
+ * MAX_ATIDs for now. Once we connect to firmware
+ * later and query the EOTID params, we'll come to
+ * know the actual # of EOTIDs supported.
+ */
netdev = alloc_etherdev_mq(sizeof(struct port_info),
- MAX_ETH_QSETS);
+ MAX_ETH_QSETS + MAX_ATIDS);
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
@@ -6004,6 +6382,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_flower(adapter))
dev_warn(&pdev->dev,
"could not offload tc flower, continuing\n");
+
+ if (cxgb4_init_tc_mqprio(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc mqprio, continuing\n");
+
+ if (cxgb4_init_tc_matchall(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc matchall, continuing\n");
}
if (is_offload(adapter) || is_hashfilter(adapter)) {
@@ -6040,6 +6426,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_dev;
+ err = setup_non_data_intr(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Non Data interrupt allocation failed, err: %d\n", err);
+ goto out_free_dev;
+ }
+
err = setup_fw_sge_queues(adapter);
if (err) {
dev_err(adapter->pdev_dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index e447976bdd3e..0fa80bef575d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
}
-static void cxgb4_process_flow_actions(struct net_device *in,
- struct flow_cls_offload *cls,
- struct ch_filter_specification *fs)
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev,
return true;
}
-static int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_cls_offload *cls)
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_DROP:
@@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
- int fidx;
- int ret;
+ int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, cls))
+ if (cxgb4_validate_flow_actions(dev, &rule->action))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
@@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs;
fs->hitcnts = 1;
cxgb4_process_flow_match(dev, cls, fs);
- cxgb4_process_flow_actions(dev, cls, fs);
+ cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) {
fidx = 0;
} else {
- fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
- if (fidx < 0) {
- netdev_err(dev, "%s: No fidx for offload.\n", __func__);
+ u8 inet_family;
+
+ inet_family = fs->type ? PF_INET6 : PF_INET;
+
+ /* Note that TC uses prio 0 to indicate stack to
+ * generate automatic prio and hence doesn't pass prio
+ * 0 to driver. However, the hardware TCAM index
+ * starts from 0. Hence, the -1 here.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, inet_family);
+
+ /* Only insert FLOWER rule if its priority doesn't
+ * conflict with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
ret = -ENOMEM;
goto free_entry;
}
}
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index eb4c95248baf..e132516e9868 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs);
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions);
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
new file mode 100644
index 000000000000..102b370fbd3e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_matchall.h"
+#include "sched.h"
+#include "cxgb4_uld.h"
+#include "cxgb4_filter.h"
+#include "cxgb4_tc_flower.h"
+
+static int cxgb4_matchall_egress_validate(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct flow_action_entry *entry;
+ u64 max_link_rate;
+ u32 i, speed;
+ int ret;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload needs at least 1 policing action");
+ return -EINVAL;
+ } else if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload only supports 1 policing action");
+ return -EINVAL;
+ } else if (pi->tc_block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload not supported with shared blocks");
+ return -EINVAL;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to get max speed supported by the link");
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ flow_action_for_each(i, entry, actions) {
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Convert bytes per second to bits per second */
+ if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified policing max rate is larger than underlying link speed");
+ return -ERANGE;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only policing action supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int cxgb4_matchall_alloc_tc(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
+ .u.params.mode = SCHED_CLASS_MODE_CLASS,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.minrate = 0,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct flow_action_entry *entry;
+ struct sched_class *e;
+ u32 i;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ flow_action_for_each(i, entry, &cls->rule->action)
+ if (entry->id == FLOW_ACTION_POLICE)
+ break;
+
+ /* Convert from bytes per second to Kbps */
+ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
+ p.u.params.channel = pi->tx_chan;
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free traffic class available for policing action");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall->egress.hwtc = e->idx;
+ tc_port_matchall->egress.cookie = cls->cookie;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static void cxgb4_matchall_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
+
+ tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
+ tc_port_matchall->egress.cookie = 0;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
+}
+
+static int cxgb4_matchall_alloc_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_filter_specification *fs;
+ int ret, fidx;
+
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here. 1 slot is enough to create a wildcard matchall
+ * VIID rule.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, PF_INET);
+
+ /* Only insert MATCHALL rule if its priority doesn't conflict
+ * with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ fs = &tc_port_matchall->ingress.fs;
+ memset(fs, 0, sizeof(*fs));
+
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+ fs->hitcnts = 1;
+
+ fs->val.pfvf_vld = 1;
+ fs->val.pf = adap->pf;
+ fs->val.vf = pi->vin;
+
+ cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
+
+ ret = cxgb4_set_filter(dev, fidx, fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.tid = fidx;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static int cxgb4_matchall_free_filter(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
+ &tc_port_matchall->ingress.fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.packets = 0;
+ tc_port_matchall->ingress.bytes = 0;
+ tc_port_matchall->ingress.last_used = 0;
+ tc_port_matchall->ingress.tid = 0;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
+ return 0;
+}
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = cls_matchall->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (tc_port_matchall->ingress.state ==
+ CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Ingress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_validate_flow_actions(dev,
+ &cls_matchall->rule->action);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_filter(dev, cls_matchall);
+ }
+
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Egress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_tc(dev, cls_matchall);
+}
+
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (cls_matchall->cookie !=
+ tc_port_matchall->ingress.fs.tc_cookie)
+ return -ENOENT;
+
+ return cxgb4_matchall_free_filter(dev);
+ }
+
+ if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
+ return -ENOENT;
+
+ cxgb4_matchall_free_tc(dev);
+ return 0;
+}
+
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u64 packets, bytes;
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
+ return -ENOENT;
+
+ ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
+ &packets, &bytes,
+ tc_port_matchall->ingress.fs.hash);
+ if (ret)
+ return ret;
+
+ if (tc_port_matchall->ingress.packets != packets) {
+ flow_stats_update(&cls_matchall->stats,
+ bytes - tc_port_matchall->ingress.bytes,
+ packets - tc_port_matchall->ingress.packets,
+ tc_port_matchall->ingress.last_used);
+
+ tc_port_matchall->ingress.packets = packets;
+ tc_port_matchall->ingress.bytes = bytes;
+ tc_port_matchall->ingress.last_used = jiffies;
+ }
+
+ return 0;
+}
+
+static void cxgb4_matchall_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_tc(dev);
+
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_filter(dev);
+}
+
+int cxgb4_init_tc_matchall(struct adapter *adap)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct cxgb4_tc_matchall *tc_matchall;
+ int ret;
+
+ tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
+ if (!tc_matchall)
+ return -ENOMEM;
+
+ tc_port_matchall = kcalloc(adap->params.nports,
+ sizeof(*tc_port_matchall),
+ GFP_KERNEL);
+ if (!tc_port_matchall) {
+ ret = -ENOMEM;
+ goto out_free_matchall;
+ }
+
+ tc_matchall->port_matchall = tc_port_matchall;
+ adap->tc_matchall = tc_matchall;
+ return 0;
+
+out_free_matchall:
+ kfree(tc_matchall);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_matchall(struct adapter *adap)
+{
+ u8 i;
+
+ if (adap->tc_matchall) {
+ if (adap->tc_matchall->port_matchall) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_matchall_disable_offload(dev);
+ }
+ kfree(adap->tc_matchall->port_matchall);
+ }
+ kfree(adap->tc_matchall);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
new file mode 100644
index 000000000000..ab6b5683dfd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MATCHALL_H__
+#define __CXGB4_TC_MATCHALL_H__
+
+#include <net/pkt_cls.h>
+
+enum cxgb4_matchall_state {
+ CXGB4_MATCHALL_STATE_DISABLED = 0,
+ CXGB4_MATCHALL_STATE_ENABLED,
+};
+
+struct cxgb4_matchall_egress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u8 hwtc; /* Traffic class bound to port */
+ u64 cookie; /* Used to identify the MATCHALL rule offloaded */
+};
+
+struct cxgb4_matchall_ingress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u32 tid; /* Index to hardware filter entry */
+ struct ch_filter_specification fs; /* Filter entry */
+ u64 bytes; /* # of bytes hitting the filter */
+ u64 packets; /* # of packets hitting the filter */
+ u64 last_used; /* Last updated jiffies time */
+};
+
+struct cxgb4_tc_port_matchall {
+ struct cxgb4_matchall_egress_entry egress; /* Egress offload info */
+ struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */
+};
+
+struct cxgb4_tc_matchall {
+ struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */
+};
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall);
+
+int cxgb4_init_tc_matchall(struct adapter *adap);
+void cxgb4_cleanup_tc_matchall(struct adapter *adap);
+#endif /* __CXGB4_TC_MATCHALL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
new file mode 100644
index 000000000000..db55673b77bd
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
+
+static int cxgb4_mqprio_validate(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u64 min_rate = 0, max_rate = 0, max_link_rate;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 speed, qcount = 0, qoffset = 0;
+ int ret;
+ u8 i;
+
+ if (!mqprio->qopt.num_tc)
+ return 0;
+
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
+ netdev_err(dev, "Only full TC hardware offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
+ netdev_err(dev, "Only channel mode offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ netdev_err(dev, "Only bandwidth rate shaper supported\n");
+ return -EINVAL;
+ } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
+ netdev_err(dev,
+ "Only %u traffic classes supported by hardware\n",
+ adap->params.nsched_cls);
+ return -ERANGE;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
+ qcount += mqprio->qopt.count[i];
+
+ /* Convert byte per second to bits per second */
+ min_rate += (mqprio->min_rate[i] * 8);
+ max_rate += (mqprio->max_rate[i] * 8);
+ }
+
+ if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
+ return -ENOMEM;
+
+ if (min_rate > max_link_rate || max_rate > max_link_rate) {
+ netdev_err(dev,
+ "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
+ min_rate, max_rate, max_link_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_init_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u32 eotid, u32 hwqid)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_desc *ring;
+
+ memset(eosw_txq, 0, sizeof(*eosw_txq));
+
+ ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
+ sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return -ENOMEM;
+
+ eosw_txq->desc = ring;
+ eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
+ spin_lock_init(&eosw_txq->lock);
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ eosw_txq->eotid = eotid;
+ eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->hwqid = hwqid;
+ eosw_txq->netdev = dev;
+ tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
+ (unsigned long)eosw_txq);
+ return 0;
+}
+
+static void cxgb4_clean_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
+ eosw_txq->pidx = 0;
+ eosw_txq->last_pidx = 0;
+ eosw_txq->cidx = 0;
+ eosw_txq->last_cidx = 0;
+ eosw_txq->flowc_idx = 0;
+ eosw_txq->inuse = 0;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->ncompl = 0;
+ eosw_txq->last_compl = 0;
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+}
+
+static void cxgb4_free_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ spin_lock_bh(&eosw_txq->lock);
+ cxgb4_clean_eosw_txq(dev, eosw_txq);
+ kfree(eosw_txq->desc);
+ spin_unlock_bh(&eosw_txq->lock);
+ tasklet_kill(&eosw_txq->qresume_tsk);
+}
+
+static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ int ret, msix = 0;
+ u32 i;
+
+ /* Allocate ETHOFLD hardware queue structures if not done already */
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_ofld_rxq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_rxq)
+ return -ENOMEM;
+
+ adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_eohw_txq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_txq) {
+ kfree(adap->sge.eohw_rxq);
+ return -ENOMEM;
+ }
+ }
+
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)adap->sge.intrq.abs_id + 1);
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Allocate Rxqs for receiving ETHOFLD Tx completions */
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ ret = msix;
+ goto out_free_queues;
+ }
+
+ eorxq->msix = &adap->msix_info[msix];
+ snprintf(eorxq->msix->desc,
+ sizeof(eorxq->msix->desc),
+ "%s-eorxq%d", dev->name, i);
+ }
+
+ init_rspq(adap, &eorxq->rspq,
+ CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
+ CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
+
+ eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
+
+ ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
+ dev, msix, &eorxq->fl,
+ cxgb4_ethofld_rx_handler,
+ NULL, 0);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate ETHOFLD hardware Txqs */
+ eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
+ ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
+ eorxq->rspq.cntxt_id);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate IRQs, set IRQ affinity, and start Rx */
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
+ eorxq->msix->desc, &eorxq->rspq);
+ if (ret)
+ goto out_free_msix;
+
+ cxgb4_set_msix_aff(adap, eorxq->msix->vec,
+ &eorxq->msix->aff_mask, i);
+ }
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_enable_rx(adap, &eorxq->rspq);
+ }
+
+ refcount_inc(&adap->tc_mqprio->refcnt);
+ return 0;
+
+out_free_msix:
+ while (i-- > 0) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+ }
+
+out_free_queues:
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ if (eorxq->rspq.desc)
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ if (eorxq->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ u32 i;
+
+ /* Return if no ETHOFLD structures have been allocated yet */
+ if (!refcount_read(&adap->tc_mqprio->refcnt))
+ return;
+
+ /* Return if no hardware queues have been allocated */
+ if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
+ return;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Device removal path will already disable NAPI
+ * before unregistering netdevice. So, only disable
+ * NAPI if we're not in device removal path
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ /* Free up ETHOFLD structures if there are no users */
+ if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+ }
+}
+
+static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
+ .u.params.mode = SCHED_CLASS_MODE_FLOW,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sched_class *e;
+ int ret;
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ p.u.params.channel = pi->tx_chan;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ /* Convert from bytes per second to Kbps */
+ p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
+ p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ tc_port_mqprio->tc_hwtc_map[i] = e->idx;
+ }
+
+ return 0;
+
+out_err:
+ while (i--)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+}
+
+static int cxgb4_mqprio_class_bind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct ch_sched_flowc fe;
+ int ret;
+
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+
+ ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void cxgb4_mqprio_class_unbind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_sched_flowc fe;
+
+ /* If we're shutting down, interrupts are disabled and no completions
+ * come back. So, skip waiting for completions in this scenario.
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+ cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
+
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+}
+
+static int cxgb4_mqprio_enable_offload(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ u32 qoffset, qcount, tot_qcount, qid, hwqid;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ int eotid, ret;
+ u16 i, j;
+ u8 hwtc;
+
+ ret = cxgb4_mqprio_alloc_hw_resources(dev);
+ if (ret)
+ return -ENOMEM;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eotid = cxgb4_get_free_eotid(&adap->tids);
+ if (eotid < 0) {
+ ret = -ENOMEM;
+ goto out_free_eotids;
+ }
+
+ qid = qoffset + j;
+ hwqid = pi->first_qset + (eotid % pi->nqsets);
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ ret = cxgb4_init_eosw_txq(dev, eosw_txq,
+ eotid, hwqid);
+ if (ret)
+ goto out_free_eotids;
+
+ cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
+ if (ret)
+ goto out_free_eotids;
+ }
+ }
+
+ memcpy(&tc_port_mqprio->mqprio, mqprio,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ /* Inform the stack about the configured tc params.
+ *
+ * Set the correct queue map. If no queue count has been
+ * specified, then send the traffic through default NIC
+ * queues; instead of ETHOFLD queues.
+ */
+ ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+ if (ret)
+ goto out_free_eotids;
+
+ tot_qcount = pi->nqsets;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qcount = mqprio->qopt.count[i];
+ if (qcount) {
+ qoffset = mqprio->qopt.offset[i] + pi->nqsets;
+ } else {
+ qcount = pi->nqsets;
+ qoffset = 0;
+ }
+
+ ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
+ if (ret)
+ goto out_reset_tc;
+
+ tot_qcount += mqprio->qopt.count[i];
+ }
+
+ ret = netif_set_real_num_tx_queues(dev, tot_qcount);
+ if (ret)
+ goto out_reset_tc;
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
+ return 0;
+
+out_reset_tc:
+ netdev_reset_tc(dev);
+ i = mqprio->qopt.num_tc;
+
+out_free_eotids:
+ while (i-- > 0) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+ return ret;
+}
+
+static void cxgb4_mqprio_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qoffset, qcount;
+ u16 i, j;
+ u8 hwtc;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
+ return;
+
+ netdev_reset_tc(dev);
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
+ qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
+ qcount = tc_port_mqprio->mqprio.qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+
+ /* Free up the traffic classes */
+ cxgb4_mqprio_free_tc(dev);
+
+ memset(&tc_port_mqprio->mqprio, 0,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
+}
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ bool needs_bring_up = false;
+ int ret;
+
+ ret = cxgb4_mqprio_validate(dev, mqprio);
+ if (ret)
+ return ret;
+
+ /* To configure tc params, the current allocated EOTIDs must
+ * be freed up. However, they can't be freed up if there's
+ * traffic running on the interface. So, ensure interface is
+ * down before configuring tc params.
+ */
+ if (netif_running(dev)) {
+ cxgb_close(dev);
+ needs_bring_up = true;
+ }
+
+ cxgb4_mqprio_disable_offload(dev);
+
+ /* If requested for clear, then just return since resources are
+ * already freed up by now.
+ */
+ if (!mqprio->qopt.num_tc)
+ goto out;
+
+ /* Allocate free available traffic classes and configure
+ * their rate parameters.
+ */
+ ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
+ if (ret)
+ goto out;
+
+ ret = cxgb4_mqprio_enable_offload(dev, mqprio);
+ if (ret) {
+ cxgb4_mqprio_free_tc(dev);
+ goto out;
+ }
+
+out:
+ if (needs_bring_up)
+ cxgb_open(dev);
+
+ return ret;
+}
+
+int cxgb4_init_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
+ struct cxgb4_tc_mqprio *tc_mqprio;
+ struct sge_eosw_txq *eosw_txq;
+ int ret = 0;
+ u8 i;
+
+ tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
+ if (!tc_mqprio)
+ return -ENOMEM;
+
+ tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
+ GFP_KERNEL);
+ if (!tc_port_mqprio) {
+ ret = -ENOMEM;
+ goto out_free_mqprio;
+ }
+
+ tc_mqprio->port_mqprio = tc_port_mqprio;
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
+ GFP_KERNEL);
+ if (!eosw_txq) {
+ ret = -ENOMEM;
+ goto out_free_ports;
+ }
+ port_mqprio->eosw_txq = eosw_txq;
+ }
+
+ adap->tc_mqprio = tc_mqprio;
+ refcount_set(&adap->tc_mqprio->refcnt, 0);
+ return 0;
+
+out_free_ports:
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(tc_port_mqprio);
+
+out_free_mqprio:
+ kfree(tc_mqprio);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 i;
+
+ if (adap->tc_mqprio) {
+ if (adap->tc_mqprio->port_mqprio) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_mqprio_disable_offload(dev);
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(adap->tc_mqprio->port_mqprio);
+ }
+ kfree(adap->tc_mqprio);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
new file mode 100644
index 000000000000..c532f1ef8451
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MQPRIO_H__
+#define __CXGB4_TC_MQPRIO_H__
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
+
+#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024
+
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64
+#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5
+#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8
+
+#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72
+
+#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ)
+
+enum cxgb4_mqprio_state {
+ CXGB4_MQPRIO_STATE_DISABLED = 0,
+ CXGB4_MQPRIO_STATE_ACTIVE,
+};
+
+struct cxgb4_tc_port_mqprio {
+ enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */
+ struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */
+ struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */
+ u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */
+};
+
+struct cxgb4_tc_mqprio {
+ refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
+};
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio);
+int cxgb4_init_tc_mqprio(struct adapter *adap);
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
+#endif /* __CXGB4_TC_MQPRIO_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 02fc63fa7f25..133f8623ba86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "cxgb4_tc_u32_parse.h"
#include "cxgb4_tc_u32.h"
@@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap,
int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
{
const struct cxgb4_match_field *start, *link_start = NULL;
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adapter = netdev2adap(dev);
__be16 protocol = cls->common.protocol;
struct ch_filter_specification fs;
@@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
- /* Fetch the location to insert the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here.
+ */
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for insertion. Max: %d\n",
- filter_id, adapter->tids.nftids);
- return -ERANGE;
+ /* Only insert U32 rule if its priority doesn't conflict with
+ * existing rules in the LETCAM.
+ */
+ if (filter_id >= adapter->tids.nftids ||
+ !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
}
t = adapter->tc_u32;
@@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ fs.tc_prio = cls->common.prio;
+ fs.tc_cookie = cls->knode.handle;
+
if (protocol == htons(ETH_P_IPV6)) {
start = cxgb4_ipv6_fields;
is_ipv6 = true;
@@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
-
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for deletion. Max: %d\n",
- filter_id, adapter->tids.nftids);
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
+ if (filter_id >= adapter->tids.nftids ||
+ cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
return -ERANGE;
- }
t = adapter->tc_u32;
handle = cls->knode.handle;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 86b528d8364c..cce33d279094 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -53,35 +53,6 @@
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
-static int get_msix_idx_from_bmap(struct adapter *adap)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
- unsigned int msix_idx;
-
- spin_lock_irqsave(&bmap->lock, flags);
- msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
- if (msix_idx < bmap->mapsize) {
- __set_bit(msix_idx, bmap->msix_bmap);
- } else {
- spin_unlock_irqrestore(&bmap->lock, flags);
- return -ENOSPC;
- }
-
- spin_unlock_irqrestore(&bmap->lock, flags);
- return msix_idx;
-}
-
-static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
-
- spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
- spin_unlock_irqrestore(&bmap->lock, flags);
-}
-
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
- int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
+ int i, err, msi_idx, que_idx = 0;
struct sge *s = &adap->sge;
unsigned int per_chan;
@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
}
if (msi_idx >= 0) {
- bmap_idx = get_msix_idx_from_bmap(adap);
- if (bmap_idx < 0) {
+ msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msi_idx < 0) {
err = -ENOSPC;
goto freeout;
}
- msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[msi_idx].desc),
+ "%s-%s%d",
+ adap->port[0]->name, rxq_info->name, i);
+
+ q->msix = &adap->msix_info[msi_idx];
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[que_idx++ / per_chan],
@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
0);
if (err)
goto freeout;
- if (msi_idx >= 0)
- rxq_info->msix_tbl[i] = bmap_idx;
+
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
@@ -188,6 +164,8 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
+ if (q->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
}
return err;
}
@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & CXGB4_USING_MSIX) {
- rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
- sizeof(unsigned short),
- GFP_KERNEL);
- if (!rxq_info->msix_tbl)
- return -ENOMEM;
- }
-
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & CXGB4_USING_MSIX)
- kfree(rxq_info->msix_tbl);
}
static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
@@ -355,13 +323,12 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
+ struct msix_info *minfo;
+ unsigned int idx;
int err = 0;
- unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
unwind:
while (idx-- > 0) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
@@ -389,69 +355,45 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
- unsigned int idx, bmap_idx;
+ struct msix_info *minfo;
+ unsigned int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
-
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
-static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int n = sizeof(adap->msix_info_ulds[0].desc);
- unsigned int idx, bmap_idx;
+ int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
-
- snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
- adap->port[0]->name, rxq_info->name, idx);
- }
-}
-
-static void enable_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (!q)
- return;
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
- if (q->handler)
- napi_enable(&q->napi);
-
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
-}
+ if (!q)
+ continue;
-static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (q && q->handler)
- napi_disable(&q->napi);
+ cxgb4_enable_rx(adap, q);
+ }
}
-static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
- for_each_uldrxq(rxq_info, idx)
- enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
-}
+ for_each_uldrxq(rxq_info, idx) {
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
-{
- struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ if (!q)
+ continue;
- for_each_uldrxq(rxq_info, idx)
- quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
+ cxgb4_quiesce_rx(q);
+ }
}
static void
@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cee582e36134..861b25d28ed6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -89,6 +89,10 @@ union aopen_entry {
union aopen_entry *next;
};
+struct eotid_entry {
+ void *data;
+};
+
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
@@ -126,6 +130,12 @@ struct tid_info {
unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
+ /* ETHOFLD range */
+ struct eotid_entry *eotid_tab;
+ unsigned long *eotid_bmap;
+ unsigned int eotid_base;
+ unsigned int neotids;
+
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
atomic_inc(&t->conns_in_use);
}
+static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
+ u32 eotid)
+{
+ return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
+}
+
+static inline int cxgb4_get_free_eotid(struct tid_info *t)
+{
+ int eotid;
+
+ eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
+ if (eotid >= t->neotids)
+ eotid = -1;
+
+ return eotid;
+}
+
+static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
+{
+ set_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = data;
+}
+
+static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
+{
+ clear_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = NULL;
+}
+
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 60218dc676a8..3e61bd5d0c29 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
e = &s->tab[p->u.params.class];
switch (op) {
case SCHED_FW_OP_ADD:
+ case SCHED_FW_OP_DEL:
err = t4_sched_params(adap, p->type,
p->u.params.level, p->u.params.mode,
p->u.params.rateunit,
@@ -92,45 +93,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf;
vf = 0;
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1,
+ &fw_param, &fw_class);
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ fe = (struct sched_flowc_entry *)arg;
+
+ fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
+ err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
+ fe->param.tid, fw_class);
break;
}
default:
err = -ENOTSUPP;
- goto out;
+ break;
}
- err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
-
-out:
return err;
}
-static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
- const unsigned int qid,
- int *index)
+static void *t4_sched_entry_lookup(struct port_info *pi,
+ enum sched_bind_type type,
+ const u32 val)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
- struct sched_class *found = NULL;
- int i;
+ void *found = NULL;
- /* Look for a class with matching bound queue parameters */
+ /* Look for an entry with matching @val */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
- struct sched_queue_entry *qe;
-
- i = 0;
- if (e->state == SCHED_STATE_UNUSED)
+ if (e->state == SCHED_STATE_UNUSED ||
+ e->bind_type != type)
continue;
- list_for_each_entry(qe, &e->queue_list, list) {
- if (qe->cntxt_id == qid) {
- found = e;
- if (index)
- *index = i;
- break;
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->entry_list, list) {
+ if (qe->cntxt_id == val) {
+ found = qe;
+ break;
+ }
+ }
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list) {
+ if (fe->param.tid == val) {
+ found = fe;
+ break;
+ }
}
- i++;
+ break;
+ }
+ default:
+ return NULL;
}
if (found)
@@ -142,52 +167,41 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- unsigned int qid;
- int index = -1;
+ struct sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
- qid = txq->q.cntxt_id;
- /* Find the existing class that the queue is bound to */
- e = t4_sched_queue_lookup(pi, qid, &index);
- if (e && index >= 0) {
- int i = 0;
-
- list_for_each_entry(qe, &e->queue_list, list) {
- if (i == index)
- break;
- i++;
- }
+ /* Find the existing entry that the queue is bound to */
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ if (qe) {
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
if (err)
return err;
+ e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
- if (atomic_dec_and_test(&e->refcnt)) {
- e->state = SCHED_STATE_UNUSED;
- memset(&e->info, 0, sizeof(e->info));
- }
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
+ struct sched_class *e;
unsigned int qid;
int err = 0;
@@ -215,7 +229,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err)
goto out_err;
- list_add_tail(&qe->list, &e->queue_list);
+ list_add_tail(&qe->list, &e->entry_list);
+ e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt);
return err;
@@ -224,6 +239,71 @@ out_err:
return err;
}
+static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ /* Find the existing entry that the flowc is bound to */
+ fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
+ if (fe) {
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
+ false);
+ if (err)
+ return err;
+
+ e = &pi->sched_tbl->tab[fe->param.class];
+ list_del(&fe->list);
+ kvfree(fe);
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
+ }
+ return err;
+}
+
+static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
+ if (!fe)
+ return -ENOMEM;
+
+ /* Unbind flowc from any existing class */
+ err = t4_sched_flowc_unbind(pi, p);
+ if (err)
+ goto out_err;
+
+ /* Bind flowc to specified class */
+ memcpy(&fe->param, p, sizeof(fe->param));
+
+ e = &s->tab[fe->param.class];
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
+ if (err)
+ goto out_err;
+
+ list_add_tail(&fe->list, &e->entry_list);
+ e->bind_type = SCHED_FLOWC;
+ atomic_inc(&e->refcnt);
+ return err;
+
+out_err:
+ kvfree(fe);
+ return err;
+}
+
static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e,
enum sched_bind_type type)
@@ -235,10 +315,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
- list_for_each_entry(qe, &e->queue_list, list)
+ list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param);
break;
}
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list)
+ t4_sched_flowc_unbind(pi, &fe->param);
+ break;
+ }
default:
break;
}
@@ -262,6 +349,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe);
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ if (bind)
+ err = t4_sched_flowc_bind(pi, fe);
+ else
+ err = t4_sched_flowc_unbind(pi, fe);
+ break;
+ }
default:
err = -ENOTSUPP;
break;
@@ -299,6 +395,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -340,6 +442,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -355,8 +463,8 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
struct sched_class *found = NULL;
+ struct sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -400,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_class *e;
+ struct sched_class *e = NULL;
u8 class_id;
int err;
@@ -415,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- /* See if there's an exisiting class with same
- * requested sched params
+ /* See if there's an exisiting class with same requested sched
+ * params. Classes can only be shared among FLOWC types. For
+ * other types, always request a new class.
*/
- e = t4_sched_class_lookup(pi, p);
+ if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
+ e = t4_sched_class_lookup(pi, p);
+
if (!e) {
struct ch_sched_params np;
@@ -467,9 +578,57 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p);
}
-static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+/**
+ * cxgb4_sched_class_free - free a scheduling class
+ * @dev: net_device pointer
+ * @e: scheduling class
+ *
+ * Frees a scheduling class if there are no users.
+ */
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
- t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s = pi->sched_tbl;
+ struct ch_sched_params p;
+ struct sched_class *e;
+ u32 speed;
+ int ret;
+
+ e = &s->tab[classid];
+ if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
+ /* Port based rate limiting needs explicit reset back
+ * to max rate. But, we'll do explicit reset for all
+ * types, instead of just port based type, to be on
+ * the safer side.
+ */
+ memcpy(&p, &e->info, sizeof(p));
+ /* Always reset mode to 0. Otherwise, FLOWC mode will
+ * still be enabled even after resetting the traffic
+ * class.
+ */
+ p.u.params.mode = 0;
+ p.u.params.minrate = 0;
+ p.u.params.pktsize = 0;
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (!ret)
+ p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
+ else
+ p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
+
+ t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
+
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+}
+
+static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ t4_sched_class_unbind_all(pi, e, e->bind_type);
+ cxgb4_sched_class_free(dev, e->idx);
}
struct sched_table *t4_init_sched(unsigned int sched_size)
@@ -487,7 +646,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
- INIT_LIST_HEAD(&s->tab[i].queue_list);
+ INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -510,7 +669,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
- t4_sched_class_free(pi, e);
+ t4_sched_class_free(adap->port[j], e);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 168fb4ce3759..e92ff68bdd0a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -52,10 +52,12 @@ enum {
enum sched_fw_ops {
SCHED_FW_OP_ADD,
+ SCHED_FW_OP_DEL,
};
enum sched_bind_type {
SCHED_QUEUE,
+ SCHED_FLOWC,
};
struct sched_queue_entry {
@@ -64,11 +66,17 @@ struct sched_queue_entry {
struct ch_sched_queue param;
};
+struct sched_flowc_entry {
+ struct list_head list;
+ struct ch_sched_flowc param;
+};
+
struct sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
- struct list_head queue_list;
+ enum sched_bind_type bind_type;
+ struct list_head entry_list;
atomic_t refcnt;
};
@@ -102,6 +110,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
struct ch_sched_params *p);
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
void t4_cleanup_sched(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 928bfea5457b..a0400b9a11e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -55,6 +55,8 @@
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
#include "cxgb4_uld.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -269,7 +271,6 @@ out_err:
}
EXPORT_SYMBOL(cxgb4_map_skb);
-#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
@@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
+#ifdef CONFIG_NEED_DMA_MAP_STATE
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
@@ -1309,6 +1311,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
}
+static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
+ struct cpl_tx_pkt_lso_core *lso)
+{
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ int l3hdr_len = skb_network_header_len(skb);
+ const struct skb_shared_info *ssi;
+ bool ipv6 = false;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_type & SKB_GSO_TCPV6)
+ ipv6 = true;
+
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(ipv6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(skb->len);
+ else
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
+
+ return (void *)(lso + 1);
+}
+
/**
* t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
* @adap: the adapter
@@ -1347,6 +1378,31 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
return reclaimed;
}
+static inline int cxgb4_validate_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 min_pkt_len)
+{
+ u32 max_pkt_len;
+
+ /* The chip min packet length is 10 octets but some firmware
+ * commands have a minimum packet length requirement. So, play
+ * safe and reject anything shorter than @min_pkt_len.
+ */
+ if (unlikely(skb->len < min_pkt_len))
+ return -EINVAL;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+
+ if (skb_vlan_tagged(skb))
+ max_pkt_len += VLAN_HLEN;
+
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1356,41 +1412,24 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
*/
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0, op;
- u64 cntrl, *end, *sgl;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adap;
- struct sge_eth_txq *q;
- const struct port_info *pi;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ int len, qidx, credits, ret;
+ const struct port_info *pi;
+ unsigned int flits, ndesc;
bool immediate = false;
- int len, max_pkt_len;
- bool ptp_enabled = is_ptp_enabled(skb, dev);
+ u32 wr_mid, ctrl0, op;
+ u64 cntrl, *end, *sgl;
+ struct sge_eth_txq *q;
unsigned int chip_ver;
- enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
-
-#ifdef CONFIG_CHELSIO_T4_FCOE
- int err;
-#endif /* CONFIG_CHELSIO_T4_FCOE */
-
- /*
- * The chip min packet length is 10 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ struct adapter *adap;
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tagged(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
goto out_free;
pi = netdev_priv(dev);
@@ -1421,8 +1460,8 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
- err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP)) {
+ ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+ if (unlikely(ret == -ENOTSUPP)) {
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
goto out_free;
@@ -1490,9 +1529,6 @@ out_free: dev_kfree_skb_any(skb);
len += sizeof(*cpl);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
if (tnl_type)
@@ -1519,30 +1555,8 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(ssi->gso_size);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(skb->len);
- else
- lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
-
- if (CHELSIO_CHIP_VERSION(adap->params.chip)
- <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ cpl = write_tso_wr(adap, skb, lso);
+ cntrl = hwcsum(adap->params.chip, skb);
}
sgl = (u64 *)(cpl + 1); /* sgl start here */
if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
@@ -1622,6 +1636,10 @@ out_free: dev_kfree_skb_any(skb);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
+
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Constants ... */
@@ -1710,32 +1728,25 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
dma_addr_t addr[MAX_SKB_FRAGS + 1];
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_vm_wr *wr;
- int qidx, credits, max_pkt_len;
struct cpl_tx_pkt_core *cpl;
const struct port_info *pi;
unsigned int flits, ndesc;
struct sge_eth_txq *txq;
struct adapter *adapter;
+ int qidx, credits, ret;
+ size_t fw_hdr_copy_len;
u64 cntrl, *end;
u32 wr_mid;
- const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci);
/* The chip minimum packet length is 10 octets but the firmware
* command that we are using requires that we copy the Ethernet header
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- if (unlikely(skb->len < fw_hdr_copy_len))
- goto out_free;
-
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
+ if (ret)
goto out_free;
/* Figure out which TX Queue we're going to use. */
@@ -1991,34 +2002,451 @@ out_free:
return NETDEV_TX_OK;
}
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
+{
+ u32 val = *idx + n;
+
+ if (val >= max)
+ val -= max;
+
+ *idx = val;
+}
+
+void cxgb4_eosw_txq_free_desc(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq, u32 ndesc)
+{
+ struct sge_eosw_desc *d;
+
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ while (ndesc--) {
+ if (d->skb) {
+ if (d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
+ dev_consume_skb_any(d->skb);
+ d->skb = NULL;
+ }
+ eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
+ eosw_txq->ndesc);
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ }
+}
+
+static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
+{
+ eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
+ eosw_txq->inuse += n;
+}
+
+static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb)
+{
+ if (eosw_txq->inuse == eosw_txq->ndesc)
+ return -ENOMEM;
+
+ eosw_txq->desc[eosw_txq->pidx].skb = skb;
+ return 0;
+}
+
+static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
+{
+ return eosw_txq->desc[eosw_txq->last_pidx].skb;
+}
+
+static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
+ struct sk_buff *skb, u32 hdr_len)
+{
+ u8 flits, nsgl = 0;
+ u32 wrlen;
+
+ wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size)
+ wrlen += sizeof(struct cpl_tx_pkt_lso_core);
+
+ wrlen += roundup(hdr_len, 16);
+
+ /* Packet headers + WR + CPLs */
+ flits = DIV_ROUND_UP(wrlen, 8);
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
+ else if (skb->len - hdr_len)
+ nsgl = sgl_len(1);
+
+ return flits + nsgl;
+}
+
+static inline void *write_eo_wr(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
+{
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cpl_tx_pkt_core *cpl;
+ u32 immd_len, wrlen16;
+ bool compl = false;
+
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+ immd_len = sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation &&
+ CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ immd_len += sizeof(struct cpl_tx_tnl_lso);
+ else
+ immd_len += sizeof(struct cpl_tx_pkt_lso_core);
+ }
+ immd_len += hdr_len;
+
+ if (!eosw_txq->ncompl ||
+ eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ compl = true;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+ }
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
+ FW_WR_COMPL_V(compl));
+ wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ wr->r3 = 0;
+ wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
+ wr->u.tcpseg.ethlen = skb_network_offset(skb);
+ wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
+ wr->u.tcpseg.tsclk_tsoff = 0;
+ wr->u.tcpseg.r4 = 0;
+ wr->u.tcpseg.r5 = 0;
+ wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+
+ wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
+ cpl = write_tso_wr(adap, skb, lso);
+ } else {
+ wr->u.tcpseg.mss = cpu_to_be16(0xffff);
+ cpl = (void *)(wr + 1);
+ }
+
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->last_compl += wrlen16;
+ return cpl;
+}
+
+static void ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 wrlen, wrlen16, hdr_len, data_len;
+ enum sge_eosw_state next_state;
+ u64 cntrl, *start, *end, *sgl;
+ struct sge_eohw_txq *eohw_txq;
+ struct cpl_tx_pkt_core *cpl;
+ struct fw_eth_tx_eo_wr *wr;
+ bool skip_eotx_wr = false;
+ struct sge_eosw_desc *d;
+ struct sk_buff *skb;
+ u8 flits, ndesc;
+ int left;
+
+ eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
+ spin_lock(&eohw_txq->lock);
+ reclaim_completed_tx_imm(&eohw_txq->q);
+
+ d = &eosw_txq->desc[eosw_txq->last_pidx];
+ skb = d->skb;
+ skb_tx_timestamp(skb);
+
+ wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
+ if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
+ eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
+ hdr_len = skb->len;
+ data_len = 0;
+ flits = DIV_ROUND_UP(hdr_len, 8);
+ if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
+ else
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
+ skip_eotx_wr = true;
+ } else {
+ hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ data_len = skb->len - hdr_len;
+ flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
+ }
+ ndesc = flits_to_desc(flits);
+ wrlen = flits * 8;
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+
+ /* If there are no CPL credits, then wait for credits
+ * to come back and retry again
+ */
+ if (unlikely(wrlen16 > eosw_txq->cred))
+ goto out_unlock;
+
+ if (unlikely(skip_eotx_wr)) {
+ start = (u64 *)wr;
+ eosw_txq->state = next_state;
+ goto write_wr_headers;
+ }
+
+ cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+ if (skb_vlan_tag_present(skb))
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
+ cpl->pack = 0;
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ start = (u64 *)(cpl + 1);
+
+write_wr_headers:
+ sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
+ hdr_len);
+ if (data_len) {
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ memset(d->addr, 0, sizeof(d->addr));
+ eohw_txq->mapping_err++;
+ goto out_unlock;
+ }
+
+ end = (u64 *)wr + flits;
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+ end = (void *)eohw_txq->q.desc + left;
+ }
+
+ if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+
+ end = (void *)eohw_txq->q.desc + left;
+ sgl = (void *)eohw_txq->q.desc;
+ }
+
+ cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
+ d->addr);
+ }
+
+ txq_advance(&eohw_txq->q, ndesc);
+ cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
+
+out_unlock:
+ spin_unlock(&eohw_txq->lock);
+}
+
+static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
+{
+ struct sk_buff *skb;
+ int pktcount;
+
+ switch (eosw_txq->state) {
+ case CXGB4_EO_STATE_ACTIVE:
+ case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
+ pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+ break;
+ case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
+ case CXGB4_EO_STATE_CLOSED:
+ default:
+ return;
+ }
+
+ while (pktcount--) {
+ skb = eosw_txq_peek(eosw_txq);
+ if (!skb) {
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
+ eosw_txq->ndesc);
+ continue;
+ }
+
+ ethofld_hard_xmit(dev, eosw_txq);
+ }
+}
+
+static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qid;
+ int ret;
+
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
+ goto out_free;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ qid = skb_get_queue_mapping(skb) - pi->nqsets;
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ spin_lock_bh(&eosw_txq->lock);
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret)
+ goto out_unlock;
+
+ /* SKB is queued for processing until credits are available.
+ * So, call the destructor now and we'll free the skb later
+ * after it has been successfully transmitted.
+ */
+ skb_orphan(skb);
+
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+ spin_unlock_bh(&eosw_txq->lock);
+ return NETDEV_TX_OK;
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
+ u16 qid = skb_get_queue_mapping(skb);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
+ if (unlikely(qid >= pi->nqsets))
+ return cxgb4_ethofld_xmit(skb, dev);
+
return cxgb4_eth_xmit(skb, dev);
}
/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
+ * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
+ * @dev - netdevice
+ * @eotid - ETHOFLD tid to bind/unbind
+ * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
- * This is a variant of cxgb4_reclaim_completed_tx() that is used
- * for Tx queues that send only immediate data (presently just
- * the control queues) and thus do not have any sk_buffs to release.
+ * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
+ * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
+ * a traffic class.
*/
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
- int reclaim = hw_cidx - q->cidx;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ enum sge_eosw_state next_state;
+ struct sge_eosw_txq *eosw_txq;
+ u32 len, len16, nparams = 6;
+ struct fw_flowc_wr *flowc;
+ struct eotid_entry *entry;
+ struct sge_ofld_rxq *rxq;
+ struct sk_buff *skb;
+ int ret = 0;
- if (reclaim < 0)
- reclaim += q->size;
+ len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
+ len16 = DIV_ROUND_UP(len, 16);
- q->in_use -= reclaim;
- q->cidx = hw_cidx;
+ entry = cxgb4_lookup_eotid(&adap->tids, eotid);
+ if (!entry)
+ return -ENOMEM;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ return -ENOMEM;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ spin_lock_bh(&eosw_txq->lock);
+ if (tc != FW_SCHED_CLS_NONE) {
+ if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
+ } else {
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
+ }
+
+ flowc = __skb_put(skb, len);
+ memset(flowc, 0, len);
+
+ rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams) |
+ FW_WR_COMPL_V(1));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[4].val = cpu_to_be32(tc);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
+ flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
+ FW_FLOWC_MNEM_EOSTATE_CLOSING :
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
+
+ eosw_txq->cred -= len16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret) {
+ dev_consume_skb_any(skb);
+ goto out_unlock;
+ }
+
+ eosw_txq->state = next_state;
+ eosw_txq->flowc_idx = eosw_txq->pidx;
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+ return ret;
}
/**
@@ -3311,6 +3739,112 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
+void cxgb4_ethofld_restart(unsigned long data)
+{
+ struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ int pktcount;
+
+ spin_lock(&eosw_txq->lock);
+ pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ if (pktcount) {
+ cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
+ eosw_txq, pktcount);
+ eosw_txq->inuse -= pktcount;
+ }
+
+ /* There may be some packets waiting for completions. So,
+ * attempt to send these packets now.
+ */
+ ethofld_xmit(eosw_txq->netdev, eosw_txq);
+ spin_unlock(&eosw_txq->lock);
+}
+
+/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the CPL message
+ * @si: the gather list of packet fragments
+ *
+ * Process a ETHOFLD Tx completion. Increment the cidx here, but
+ * free up the descriptors in a tasklet later.
+ */
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ /* skip RSS header */
+ rsp++;
+
+ if (opcode == CPL_FW4_ACK) {
+ const struct cpl_fw4_ack *cpl;
+ struct sge_eosw_txq *eosw_txq;
+ struct eotid_entry *entry;
+ struct sk_buff *skb;
+ u32 hdr_len, eotid;
+ u8 flits, wrlen16;
+ int credits;
+
+ cpl = (const struct cpl_fw4_ack *)rsp;
+ eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
+ q->adap->tids.eotid_base;
+ entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
+ if (!entry)
+ goto out_done;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ goto out_done;
+
+ spin_lock(&eosw_txq->lock);
+ credits = cpl->credits;
+ while (credits > 0) {
+ skb = eosw_txq->desc[eosw_txq->cidx].skb;
+ if (!skb)
+ break;
+
+ if (unlikely((eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
+ eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
+ eosw_txq->cidx == eosw_txq->flowc_idx)) {
+ flits = DIV_ROUND_UP(skb->len, 8);
+ if (eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
+ eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
+ else
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ complete(&eosw_txq->completion);
+ } else {
+ hdr_len = eth_get_headlen(eosw_txq->netdev,
+ skb->data,
+ skb_headlen(skb));
+ flits = ethofld_calc_tx_flits(q->adap, skb,
+ hdr_len);
+ }
+ eosw_txq_advance_index(&eosw_txq->cidx, 1,
+ eosw_txq->ndesc);
+ wrlen16 = DIV_ROUND_UP(flits * 8, 16);
+ credits -= wrlen16;
+ }
+
+ eosw_txq->cred += cpl->credits;
+ eosw_txq->ncompl--;
+
+ spin_unlock(&eosw_txq->lock);
+
+ /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+ * if there were packets waiting for completion.
+ */
+ tasklet_schedule(&eosw_txq->qresume_tsk);
+ }
+
+out_done:
+ return 0;
+}
+
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
@@ -3912,30 +4446,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
- struct net_device *dev, unsigned int iqid,
- unsigned int uld_type)
+static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
+ struct net_device *dev, u32 cmd, u32 iqid)
{
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
- int ret, nentries;
- struct fw_eq_ofld_cmd c;
- struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int cmd = FW_EQ_OFLD_CMD;
+ struct sge *s = &adap->sge;
+ struct fw_eq_ofld_cmd c;
+ u32 fb_min, nentries;
+ int ret;
/* Add status entries */
- nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
- NUMA_NO_NODE);
- if (!txq->q.desc)
+ nentries = q->size + s->stat_len / sizeof(struct tx_desc);
+ q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &q->phys_addr,
+ &q->sdesc, s->stat_len, NUMA_NO_NODE);
+ if (!q->desc)
return -ENOMEM;
+ if (chip_ver <= CHELSIO_T5)
+ fb_min = FETCHBURSTMIN_64B_X;
+ else
+ fb_min = FETCHBURSTMIN_64B_T6_X;
+
memset(&c, 0, sizeof(c));
- if (unlikely(uld_type == CXGB4_TX_CRYPTO))
- cmd = FW_EQ_CTRL_CMD;
c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
@@ -3947,27 +4481,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
- ? FETCHBURSTMIN_64B_X
- : FETCHBURSTMIN_64B_T6_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ c.eqaddr = cpu_to_be64(q->phys_addr);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
+ kfree(q->sdesc);
+ q->sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
+ q->desc, q->phys_addr);
+ q->desc = NULL;
return ret;
}
+ init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
+ return 0;
+}
+
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
+{
+ u32 cmd = FW_EQ_OFLD_CMD;
+ int ret;
+
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
+ if (ret)
+ return ret;
+
txq->q.q_type = CXGB4_TXQ_ULD;
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
@@ -3976,6 +4525,25 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
return 0;
}
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid)
+{
+ int ret;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
+ if (ret)
+ return ret;
+
+ txq->q.q_type = CXGB4_TXQ_ULD;
+ spin_lock_init(&txq->lock);
+ txq->adap = adap;
+ txq->tso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -4031,6 +4599,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
q->fl.size ? &q->fl : NULL);
}
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ free_txq(adap, &txq->q);
+ }
+}
+
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
@@ -4060,6 +4639,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+ if (eq->msix) {
+ cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
+ eq->msix = NULL;
+ }
etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
@@ -4086,8 +4669,15 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- if (adap->sge.fw_evtq.desc)
+ if (adap->sge.fw_evtq.desc) {
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+ if (adap->sge.fwevtq_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap,
+ adap->sge.fwevtq_msix_idx);
+ }
+
+ if (adap->sge.nd_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f2a7824da42b..19d18acfc9a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
unsigned int *speedp, unsigned int *mtup)
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
- struct fw_port_cmd port_cmd;
unsigned int action, link_ok, mtu;
+ struct fw_port_cmd port_cmd;
fw_port_cap32_t linkattr;
int ret;
@@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- *link_okp = link_ok;
- *speedp = fwcap_to_speed(linkattr);
- *mtup = mtu;
+ if (link_okp)
+ *link_okp = link_ok;
+ if (speedp)
+ *speedp = fwcap_to_speed(linkattr);
+ if (mtup)
+ *mtup = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 38dd41eb959e..575c6abcdae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1421,6 +1421,11 @@ enum {
CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
};
+#define CPL_FW4_ACK_FLOWID_S 0
+#define CPL_FW4_ACK_FLOWID_M 0xffffff
+#define CPL_FW4_ACK_FLOWID_G(x) \
+ (((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M)
+
struct cpl_fw6_msg {
u8 opcode;
u8 type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 65313f6b5704..414e5cca293e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -87,6 +87,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_EO_WR = 0x1c,
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
@@ -534,6 +535,35 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+enum fw_eth_tx_eo_type {
+ FW_ETH_TX_EO_TYPE_TCPSEG = 1,
+};
+
+struct fw_eth_tx_eo_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+ union fw_eth_tx_eo {
+ struct fw_eth_tx_eo_tcpseg {
+ __u8 type;
+ __u8 ethlen;
+ __be16 iplen;
+ __u8 tcplen;
+ __u8 tsclk_tsoff;
+ __be16 r4;
+ __be16 mss;
+ __be16 r5;
+ __be32 plen;
+ } tcpseg;
+ } u;
+};
+
+#define FW_ETH_TX_EO_WR_IMMDLEN_S 0
+#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff
+#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S)
+#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \
+ (((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M)
+
struct fw_ofld_connection_wr {
__be32 op_compl;
__be32 len16_pkd;
@@ -660,6 +690,12 @@ enum fw_flowc_mnem_tcpstate {
FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */
};
+enum fw_flowc_mnem_eostate {
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
+ /* graceful close, after sending outstanding payload */
+ FW_FLOWC_MNEM_EOSTATE_CLOSING = 2,
+};
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -1134,6 +1170,7 @@ enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
};
enum fw_caps_config_ofld {
@@ -1276,6 +1313,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f1a0c4dceda0..f37c9a08c4cf 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
dev = platform_get_drvdata(pdev);
if (dev == NULL)
@@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr);
if (ep->res != NULL) {
- release_resource(ep->res);
- kfree(ep->res);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index e736ce2c58ca..a8f4c69252ff 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
+ free_netdev(port->netdev);
return 0;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index da0c506349d1..a6f2063f1475 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1612,7 +1612,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1637,8 +1637,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
/* Get PHY mode from device-tree */
if (np) {
/* Default to RGMII. It's a gigabit part after all */
- phy_intf = of_get_phy_mode(np);
- if (phy_intf < 0)
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
phy_intf = PHY_INTERFACE_MODE_RGMII;
/* Aspeed only supports these. I don't know about other IP
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index fbef2829f3de..c6fb8e4021ac 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -2,6 +2,7 @@
config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
+ select PHYLINK
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index acc56606d3a5..7ff147e89426 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2257,8 +2257,16 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
- if (err == -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER) {
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+ nctx = &channel->nctx;
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+ free_channel(priv, channel);
+ }
+ priv->num_channels = 0;
return err;
+ }
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 0883620631b8..96676abcebd5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -173,6 +173,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -186,15 +187,22 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ if (priv->mac)
+ dpaa2_mac_get_strings(p);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
+ int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ if (priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
default:
return -EOPNOTSUPP;
}
@@ -293,6 +301,9 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
return;
}
*(data + i++) = buf_cnt;
+
+ if (priv->mac)
+ dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index fea388d86f20..84233e467ed1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -7,14 +7,19 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
-static phy_interface_t phy_mode(enum dpmac_eth_if eth_if)
+static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
+ *if_mode = PHY_INTERFACE_MODE_NA;
+
switch (eth_if) {
case DPMAC_ETH_IF_RGMII:
- return PHY_INTERFACE_MODE_RGMII;
+ *if_mode = PHY_INTERFACE_MODE_RGMII;
+ break;
default:
return -EINVAL;
}
+
+ return 0;
}
/* Caller must call of_node_put on the returned value */
@@ -44,17 +49,18 @@ static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
static int dpaa2_mac_get_if_mode(struct device_node *node,
struct dpmac_attr attr)
{
- int if_mode;
+ phy_interface_t if_mode;
+ int err;
- if_mode = of_get_phy_mode(node);
- if (if_mode >= 0)
+ err = of_get_phy_mode(node, &if_mode);
+ if (!err)
return if_mode;
- if_mode = phy_mode(attr.eth_if);
- if (if_mode >= 0)
+ err = phy_mode(attr.eth_if, &if_mode);
+ if (!err)
return if_mode;
- return -ENODEV;
+ return err;
}
static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
@@ -299,3 +305,71 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_destroy(mac->phylink);
dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
}
+
+static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
+ [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
+ [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
+ [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
+ [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
+ [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
+ [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
+ [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
+ [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
+ [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
+ [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
+ [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
+ [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
+ [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
+ [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
+ [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
+ [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
+ [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
+ [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
+ [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
+ [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
+ [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
+ [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
+ [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
+ [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
+ [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
+ [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
+ [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
+ [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
+};
+
+#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+int dpaa2_mac_get_sset_count(void)
+{
+ return DPAA2_MAC_NUM_STATS;
+}
+
+void dpaa2_mac_get_strings(u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ int i, err;
+ u64 value;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
+ i, &value);
+ if (err) {
+ netdev_err_once(mac->net_dev,
+ "dpmac_get_counter error %d\n", err);
+ *(data + i) = U64_MAX;
+ continue;
+ }
+ *(data + i) = value;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 8634d0de7ef3..4da8079b9155 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -29,4 +29,10 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+int dpaa2_mac_get_sset_count(void);
+
+void dpaa2_mac_get_strings(u8 *data);
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index 96a9b0d0992e..3ea51dd9374b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -22,6 +22,8 @@
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -59,4 +61,13 @@ struct dpmac_cmd_set_link_state {
__le64 advertising;
};
+struct dpmac_cmd_get_counter {
+ u8 id;
+};
+
+struct dpmac_rsp_get_counter {
+ u64 pad;
+ u64 counter;
+};
+
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index b75189deffb1..d5997b654562 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -147,3 +147,37 @@ int dpmac_set_link_state(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @id: The requested counter ID
+ * @value: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->id = id;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *value = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 4efc410a479e..135f143097a5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -141,4 +141,86 @@ int dpmac_set_link_state(struct fsl_mc_io *mc_io,
u16 token,
struct dpmac_link_state *link_state);
+/**
+ * enum dpmac_counter_id - DPMAC counter types
+ *
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter_id {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_EGR_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value);
+
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index c219587bd334..491659fe3e35 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -50,3 +50,13 @@ config FSL_ENETC_HW_TIMESTAMPING
allocation has not been supported and it is too expensive to use
extended RX BDs if timestamping is not used, this option enables
extended RX BDs in order to support hardware timestamping.
+
+config FSL_ENETC_QOS
+ bool "ENETC hardware Time-sensitive Network support"
+ depends on (FSL_ENETC || FSL_ENETC_VF) && NET_SCH_TAPRIO
+ help
+ There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci
+ /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set
+ enable/disable from user space via Qos commands(tc). In the kernel
+ side, it can be loaded by Qos driver. Currently, it is only support
+ taprio(802.1Qbv).
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d200c27c3bf6..d0db33e5b6b7 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -5,9 +5,11 @@ common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index b6ff89307409..f6b00c68451b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -742,9 +742,14 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
if (val & ENETC_SIPCAPR0_RSS) {
- val = enetc_rd(hw, ENETC_SIRSSCAPR);
- si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+ u32 rss;
+
+ rss = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1314,8 +1319,12 @@ static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
static void adjust_link(struct net_device *ndev)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
+ if (priv->active_offloads & ENETC_F_QBV)
+ enetc_sched_speed_set(ndev);
+
phy_print_status(phydev);
}
@@ -1427,8 +1436,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -1436,9 +1444,6 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
u8 num_tc;
int i;
- if (type != TC_SETUP_QDISC_MQPRIO)
- return -EOPNOTSUPP;
-
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
@@ -1483,6 +1488,19 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return 0;
}
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1599,7 +1617,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return enetc_hwtstamp_get(ndev, rq);
#endif
- return -EINVAL;
+
+ if (!ndev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(ndev->phydev, rq, cmd);
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 541b4e2073fe..89f23156f330 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -118,6 +118,8 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(2),
};
+#define ENETC_SI_F_QBV BIT(0)
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -133,6 +135,7 @@ struct enetc_si {
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ int hw_features;
};
#define ENETC_SI_ALIGN 32
@@ -173,6 +176,7 @@ struct enetc_cls_rule {
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
+ ENETC_F_QBV = BIT(2),
};
struct enetc_ndev_priv {
@@ -188,6 +192,8 @@ struct enetc_ndev_priv {
u16 msg_enable;
int active_offloads;
+ u32 speed; /* store speed for compare update pspeed */
+
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -244,3 +250,12 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+
+#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
+void enetc_sched_speed_set(struct net_device *ndev);
+#else
+#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
+#define enetc_sched_speed_set(ndev) (void)0
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index de466b71bf8f..201cbc362e33 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -32,7 +32,7 @@ static int enetc_cbd_unused(struct enetc_cbdr *r)
r->bd_count;
}
-static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
{
struct enetc_cbdr *ring = &si->cbd_ring;
int timeout = ENETC_CBDR_TIMEOUT;
@@ -66,6 +66,9 @@ static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
if (!timeout)
return -EBUSY;
+ /* CBD may writeback data, feedback up level */
+ *cbd = *dest_cbd;
+
enetc_clean_cbdr(si);
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index fcb52efec075..880a8ed8bb47 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -584,6 +584,31 @@ static int enetc_get_ts_info(struct net_device *ndev,
return 0;
}
+static void enetc_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int enetc_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, wol->wolopts);
+
+ return ret;
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
@@ -601,6 +626,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_ts_info = enetc_get_ts_info,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 88276299f447..924ddb6d358a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -18,6 +18,7 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -148,6 +149,11 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PORT_BASE 0x10000
#define ENETC_PMR 0x0000
#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8)
+#define ENETC_PMR_PSPEED_10M 0
+#define ENETC_PMR_PSPEED_100M BIT(8)
+#define ENETC_PMR_PSPEED_1000M BIT(9)
+#define ENETC_PMR_PSPEED_2500M BIT(10)
#define ENETC_PSR 0x0004 /* RO */
#define ENETC_PSIPMR 0x0018
#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
@@ -440,22 +446,6 @@ union enetc_rx_bd {
#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
#define ENETC_MAX_NUM_VFS 2
-struct enetc_cbd {
- union {
- struct {
- __le32 addr[2];
- __le32 opt[4];
- };
- __le32 data[6];
- };
- __le16 index;
- __le16 length;
- u8 cmd;
- u8 cls;
- u8 _res;
- u8 status_flags;
-};
-
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
@@ -554,3 +544,70 @@ static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
val |= ENETC_TBMR_SET_PRIO(prio);
enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
}
+
+enum bdcr_cmd_class {
+ BDCR_CMD_UNSPEC = 0,
+ BDCR_CMD_MAC_FILTER,
+ BDCR_CMD_VLAN_FILTER,
+ BDCR_CMD_RSS,
+ BDCR_CMD_RFS,
+ BDCR_CMD_PORT_GCL,
+ BDCR_CMD_RECV_CLASSIFIER,
+ __BDCR_CMD_MAX_LEN,
+ BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
+};
+
+/* class 5, command 0 */
+struct tgs_gcl_conf {
+ u8 atc; /* init gate value */
+ u8 res[7];
+ struct {
+ u8 res1[4];
+ __le16 acl_len;
+ u8 res2[2];
+ };
+};
+
+/* gate control list entry */
+struct gce {
+ __le32 period;
+ u8 gate;
+ u8 res[3];
+};
+
+/* tgs_gcl_conf address point to this data space */
+struct tgs_gcl_data {
+ __le32 btl;
+ __le32 bth;
+ __le32 ct;
+ __le32 cte;
+ struct gce entry[0];
+};
+
+struct enetc_cbd {
+ union{
+ struct {
+ __le32 addr[2];
+ union {
+ __le32 opt[4];
+ struct tgs_gcl_conf gcl_conf;
+ };
+ }; /* Long format */
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+/* port time gating control register */
+#define ENETC_QBV_PTGCR_OFFSET 0x11a00
+#define ENETC_QBV_TGE BIT(31)
+#define ENETC_QBV_TGPE BIT(30)
+
+/* Port time gating capability register */
+#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
+#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index b73421c3e25b..e7482d483b28 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -742,6 +742,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->hw_features & ENETC_SI_F_QBV)
+ priv->active_offloads |= ENETC_F_QBV;
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
@@ -784,8 +787,8 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
}
}
- priv->if_mode = of_get_phy_mode(np);
- if ((int)priv->if_mode < 0) {
+ err = of_get_phy_mode(np, &priv->if_mode);
+ if (err) {
dev_err(priv->dev, "missing phy type\n");
of_node_put(priv->phy_node);
if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
new file mode 100644
index 000000000000..66a3da61ca16
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "enetc.h"
+
+#include <net/pkt_sched.h>
+
+static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
+{
+ return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
+ & ENETC_QBV_MAX_GCL_LEN_MASK;
+}
+
+void enetc_sched_speed_set(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 old_speed = priv->speed;
+ u32 speed, pspeed;
+
+ if (phydev->speed == old_speed)
+ return;
+
+ speed = phydev->speed;
+ switch (speed) {
+ case SPEED_1000:
+ pspeed = ENETC_PMR_PSPEED_1000M;
+ break;
+ case SPEED_2500:
+ pspeed = ENETC_PMR_PSPEED_2500M;
+ break;
+ case SPEED_100:
+ pspeed = ENETC_PMR_PSPEED_100M;
+ break;
+ case SPEED_10:
+ default:
+ pspeed = ENETC_PMR_PSPEED_10M;
+ netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC_PMR,
+ (enetc_port_rd(&priv->si->hw, ENETC_PMR)
+ & (~ENETC_PMR_PSPEED_MASK))
+ | pspeed);
+}
+
+static int enetc_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *admin_conf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct tgs_gcl_conf *gcl_config;
+ struct tgs_gcl_data *gcl_data;
+ struct gce *gce;
+ dma_addr_t dma;
+ u16 data_size;
+ u16 gcl_len;
+ u32 tge;
+ int err;
+ int i;
+
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ return -EINVAL;
+ gcl_len = admin_conf->num_entries;
+
+ tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ if (!admin_conf->enable) {
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+ return 0;
+ }
+
+ if (admin_conf->cycle_time > U32_MAX ||
+ admin_conf->cycle_time_extension > U32_MAX)
+ return -EINVAL;
+
+ /* Configure the (administrative) gate control list using the
+ * control BD descriptor.
+ */
+ gcl_config = &cbd.gcl_conf;
+
+ data_size = struct_size(gcl_data, entry, gcl_len);
+ gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!gcl_data)
+ return -ENOMEM;
+
+ gce = (struct gce *)(gcl_data + 1);
+
+ /* Set all gates open as default */
+ gcl_config->atc = 0xff;
+ gcl_config->acl_len = cpu_to_le16(gcl_len);
+
+ if (!admin_conf->base_time) {
+ gcl_data->btl =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
+ gcl_data->bth =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
+ } else {
+ gcl_data->btl =
+ cpu_to_le32(lower_32_bits(admin_conf->base_time));
+ gcl_data->bth =
+ cpu_to_le32(upper_32_bits(admin_conf->base_time));
+ }
+
+ gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
+ gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
+
+ for (i = 0; i < gcl_len; i++) {
+ struct tc_taprio_sched_entry *temp_entry;
+ struct gce *temp_gce = gce + i;
+
+ temp_entry = &admin_conf->entries[i];
+
+ temp_gce->gate = (u8)temp_entry->gate_mask;
+ temp_gce->period = cpu_to_le32(temp_entry->interval);
+ }
+
+ cbd.length = cpu_to_le16(data_size);
+ cbd.status_flags = 0;
+
+ dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
+ data_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(gcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ cbd.cls = BDCR_CMD_PORT_GCL;
+ cbd.status_flags = 0;
+
+ enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
+ tge | ENETC_QBV_TGE);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
+ kfree(gcl_data);
+
+ return err;
+}
+
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? i : 0);
+
+ err = enetc_setup_taprio(ndev, taprio);
+
+ if (err)
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? 0 : i);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7d37ba9f6819..b886b075650e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3393,6 +3393,7 @@ fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
struct fec_platform_data *pdata;
+ phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
const struct of_device_id *of_id;
@@ -3465,15 +3466,15 @@ fec_probe(struct platform_device *pdev)
}
fep->phy_node = phy_node;
- ret = of_get_phy_mode(pdev->dev.of_node);
- if (ret < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+ if (ret) {
pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
fep->phy_interface = PHY_INTERFACE_MODE_MII;
} else {
- fep->phy_interface = ret;
+ fep->phy_interface = interface;
}
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -3644,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev)
regulator_disable(fep->reg_phy);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7ab8095db192..f0806ace1ae2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -608,7 +608,7 @@ static int mac_probe(struct platform_device *_of_dev)
const u8 *mac_addr;
u32 val;
u8 fman_id;
- int phy_if;
+ phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -776,8 +776,8 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the PHY connection type */
- phy_if = of_get_phy_mode(mac_node);
- if (phy_if < 0) {
+ err = of_get_phy_mode(mac_node, &phy_if);
+ if (err) {
dev_warn(dev,
"of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
mac_node);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 51ad86417cb1..72868a28b621 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -641,6 +641,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const char *model;
const void *mac_addr;
int err = 0, i;
+ phy_interface_t interface;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node;
@@ -805,9 +806,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
* rgmii-id really needs to be specified. Other types can be
* detected by hardware
*/
- err = of_get_phy_mode(np);
- if (err >= 0)
- priv->interface = err;
+ err = of_get_phy_mode(np, &interface);
+ if (!err)
+ priv->interface = interface;
else
priv->interface = gfar_get_interface(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f472a6dbbe6f..432c6a818ae5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -90,11 +90,11 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-/* prevent fragmenation by HW in DSA environments */
-#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
-#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
+#define GFAR_SKBFRAG_OVR (RXBUF_ALIGNMENT \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_SIZE rounddown(GFAR_RXB_TRUESIZE - GFAR_SKBFRAG_OVR, 64)
+#define GFAR_SKBFRAG_SIZE (GFAR_RXB_SIZE + GFAR_SKBFRAG_OVR)
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 4606a7e4a6d1..3e9b6d543c77 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -211,7 +211,7 @@ struct hip04_priv {
#if defined(CONFIG_HI13X1_GMAC)
void __iomem *sysctrl_base;
#endif
- int phy_mode;
+ phy_interface_t phy_mode;
int chan;
unsigned int port;
unsigned int group;
@@ -961,10 +961,9 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->phy_mode = of_get_phy_mode(node);
- if (priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
dev_warn(d, "not find phy-mode\n");
- ret = -EINVAL;
goto init_fail;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c41b19c760f8..247de9105d10 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1193,10 +1193,9 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto err_free_mdio;
- priv->phy_mode = of_get_phy_mode(node);
- if ((int)priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
netdev_err(ndev, "not find phy-mode\n");
- ret = -EINVAL;
goto err_mdiobus;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 6d0457eb4faa..08339278c722 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q;
ring->flags = flags;
- spin_lock_init(&ring->lock);
ring->coal_param = q->handle->coal_param;
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e9c67c06bfd2..6ab9458302e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -274,9 +274,6 @@ struct hnae_ring {
/* statistic */
struct ring_stats stats;
- /* ring lock for poll one */
- spinlock_t lock;
-
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index a48396dd4ebb..14ab20491fd0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
return u > c ? (h > c && h <= u) : (h > c || h <= u);
}
-/* netif_tx_lock will turn down the performance, set only when necessary */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
-#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
-#else
-#define NETIF_TX_LOCK(ring)
-#define NETIF_TX_UNLOCK(ring)
-#endif
-
/* reclaim all desc in one budget
* return error or number of desc left
*/
@@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ring);
-
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */
- if (is_ring_empty(ring) || head == ring->next_to_clean) {
- NETIF_TX_UNLOCK(ring);
+ if (is_ring_empty(ring) || head == ring->next_to_clean)
return 0; /* no data to poll */
- }
if (!is_valid_clean_head(ring, head)) {
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++;
- NETIF_TX_UNLOCK(ring);
return -EIO;
}
@@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
ring->stats.tx_pkts += pkts;
ring->stats.tx_bytes += bytes;
- NETIF_TX_UNLOCK(ring);
-
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
@@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ring);
-
head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0;
pkts = 0;
while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
- NETIF_TX_UNLOCK(ring);
-
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 45f59165bcd2..3b5e2d7251e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNAE3_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 345633f9dbf6..9d47abd5c37c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNS3_ENET_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b104d3c3b757..6e0212b79438 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
-struct hns3_link_mode_mapping {
- u32 hns3_link_mode;
- u32 ethtool_link_mode;
-};
-
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index af96e7904925..d97da67f07a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_CMD_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 49ad8483723d..d6c3952aba04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret)
return ret;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
+ int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hclge_tm_pfc_info_update(hdev);
- return hclge_pause_setup_hw(hdev, false);
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret) {
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
+ }
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
/* DCBX configuration */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 278f21e02736..b04702e65689 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 4f8f0684beb2..7c7038676d6d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3679,12 +3679,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev)
{
struct hclge_pf_rst_done_cmd *req;
struct hclge_desc desc;
+ int ret;
req = (struct hclge_pf_rst_done_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
- return hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* To be compatible with the old firmware, which does not support
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
+ * return success
+ */
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "current firmware does not support command(0x%x)!\n",
+ HCLGE_OPC_PF_RST_DONE);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
+ ret);
+ }
+
+ return ret;
}
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
@@ -6350,11 +6366,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
- false);
+ true);
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req->func_id = cpu_to_le32(func_id);
- req->switch_param = switch_param;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 599f76a05f41..88f6c4cbac7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MAIN_H
@@ -225,8 +225,6 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_OTHER,
};
-#define HCLGE_MPF_ENBALE 1
-
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index ef095d9c566f..dd9a1218a7b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MDIO_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 95ef6e1204cf..45bcb67f90fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_TM_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index ef86155de9e0..2f4c81bf4169 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -150,8 +150,6 @@ enum hclgevf_states {
HCLGEVF_STATE_CMD_DISABLE,
};
-#define HCLGEVF_MPF_ENBALE 1
-
struct hclgevf_mac {
u8 media_type;
u8 module_type;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 6e70658d50c4..db45373ea31c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -670,13 +670,10 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
static int ehea_is_hugepage(unsigned long pfn)
{
- int page_order;
-
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
return 0;
- page_order = compound_order(pfn_to_page(pfn));
- if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
return 0;
return 1;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9e43c9ace9c2..2e40425d8a34 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
+ int err;
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = of_get_phy_mode(np);
- if (dev->phy_mode < 0)
+ err = of_get_phy_mode(np, &dev->phy_mode);
+ if (err)
dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index e9cda024cbf6..89a1b0fea158 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -171,7 +171,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- int phy_mode;
+ phy_interface_t phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index b9e821de2ac6..57a25c7a9e70 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int zmii_attach(struct platform_device *ofdev, int input, int *mode)
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
struct zmii_regs __iomem *p = dev->base;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 41d46e9b87ba..65daedc78594 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -50,7 +50,8 @@ struct zmii_instance {
int zmii_init(void);
void zmii_exit(void);
-int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode);
void zmii_detach(struct platform_device *ofdev, int input);
void zmii_get_mdio(struct platform_device *ofdev, int input);
void zmii_put_mdio(struct platform_device *ofdev, int input);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5be4ebd8437..84121aab7ff1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1011,6 +1011,29 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
return 0;
}
+static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ethhdr *ether_header;
+ int ret = 0;
+
+ ether_header = eth_hdr(skb);
+
+ if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
+ netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
+ netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -1022,6 +1045,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
dma_addr_t dma_addr;
unsigned long mss = 0;
+ if (ibmveth_is_packet_unsupported(skb, netdev))
+ goto out;
+
/* veth doesn't handle frag_list, so linearize the skb.
* When GRO is enabled SKB's can have frag_list.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 86493fea56e4..416da9619928 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3565,8 +3565,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- pr_info("%s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 032b88619054..fe7997c18a10 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6031,7 +6031,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
usleep_range(1000, 1100);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
- e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
pm_runtime_get_sync(netdev->dev.parent);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b14441944b4b..f306084ca12c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -534,6 +534,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
+void fm10k_iov_update_stats(struct fm10k_intfc *interface);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
@@ -542,6 +543,8 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
int __always_unused min_rate, int max_rate);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index afe1fafd2447..8c50a128df29 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -520,6 +520,27 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+/**
+ * fm10k_iov_update_stats - Update stats for all VFs
+ * @interface: device private structure
+ *
+ * Updates the VF statistics for all enabled VFs. Expects to be called by
+ * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
+ * bit is already handled.
+ */
+void fm10k_iov_update_stats(struct fm10k_intfc *interface)
+{
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ int i;
+
+ if (!iov_data)
+ return;
+
+ for (i = 0; i < iov_data->num_vfs; i++)
+ hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
+}
+
static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
struct fm10k_vf_info *vf_info)
{
@@ -650,3 +671,30 @@ int fm10k_ndo_get_vf_config(struct net_device *netdev,
return 0;
}
+
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_hw_stats_q *hw_stats;
+ u32 idx, qpp;
+
+ /* verify SR-IOV is active and that vf idx is valid */
+ if (!iov_data || vf_idx >= iov_data->num_vfs)
+ return -EINVAL;
+
+ qpp = fm10k_queues_per_pool(hw);
+ hw_stats = iov_data->vf_info[vf_idx].stats;
+
+ for (idx = 0; idx < qpp; idx++) {
+ stats->rx_packets += hw_stats[idx].rx_packets.count;
+ stats->tx_packets += hw_stats[idx].tx_packets.count;
+ stats->rx_bytes += hw_stats[idx].rx_bytes.count;
+ stats->tx_bytes += hw_stats[idx].tx_bytes.count;
+ stats->rx_dropped += hw_stats[idx].rx_drops.count;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2be9222510e7..17738b0a9873 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.26.1-k"
+#define DRV_VERSION "0.27.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 09f7a246e134..68baee04dc58 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1643,6 +1643,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
+ .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
.ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index bb236fa44048..d122d0087191 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -630,6 +630,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+ /* Update VF statistics */
+ fm10k_iov_update_stats(interface);
+
clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 160bc5b78f99..ceb9b791f799 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
@@ -76,8 +76,8 @@ struct fm10k_tlv_attr {
#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
-#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
-#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED, 0 }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR, 0, 0 }
struct fm10k_msg_data {
unsigned int id;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 15ac1c7885bc..63968c5d7c5d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -581,6 +581,7 @@ struct fm10k_vf_info {
* at the same offset as the mailbox
*/
struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ struct fm10k_hw_stats_q stats[FM10K_MAX_QUEUES_POOL];
int rate; /* Tx BW cap as defined by OS */
u16 glort; /* resource tag for this VF */
u16 sw_vid; /* Switch API assigned VLAN */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index a23f89fb33ee..aa5f1c0aa721 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 8b25a6d9c81d..d4055037af89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -29,6 +29,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_B:
case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
@@ -1895,7 +1896,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
memcpy(&tmp, resp->link_type, sizeof(tmp));
@@ -4910,6 +4912,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b3d7edbb1389..1ccabeafa44c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2664,8 +2664,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev_info(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
@@ -12870,6 +12870,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_get_vf_stats = i40e_get_vf_stats,
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a2710664d653..6a3f0fc56c3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4524,3 +4524,51 @@ out:
clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 1ce06240a702..631248c0981a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -138,5 +138,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index a05dfecdd9b4..d07e1a890428 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
- if (tx_ring->next_to_clean == tx_ring->next_to_use)
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
xmit_done = i40e_xmit_zc(tx_ring, budget);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8f310e520b06..821987da5698 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx);
wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
- q_vector->rx.current_itr);
+ q_vector->rx.current_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
}
@@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++;
wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
}
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9edde960b4f2..7cb829132d28 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,9 +13,12 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_base.o \
ice_lib.o \
+ ice_txrx_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
-ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 45e100666049..8d7e8fc55585 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -29,10 +29,13 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
+#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
+#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
+#include <net/xdp_sock.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -42,6 +45,7 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_xsk.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -78,8 +82,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
+#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -127,6 +130,14 @@ extern const char ice_drv_ver[];
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
+struct ice_txq_meta {
+ u32 q_teid; /* Tx-scheduler element identifier */
+ u16 q_id; /* Entry in VSI's txq_map bitmap */
+ u16 q_handle; /* Relative index of Tx queue within TC */
+ u16 vsi_idx; /* VSI index that Tx queue belongs to */
+ u8 tc; /* TC number that Tx queue belongs to */
+};
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -169,6 +180,7 @@ enum ice_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
+ __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -274,6 +286,13 @@ struct ice_vsi {
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring **xdp_rings; /* XDP ring array */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -313,6 +332,7 @@ enum ice_pf_flags {
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
+ ICE_FLAG_LEGACY_RX,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -346,6 +366,7 @@ struct ice_pf {
struct work_struct serv_task;
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
+ struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
@@ -417,6 +438,37 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back;
}
+static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
+
+static inline void ice_set_ring_xdp(struct ice_ring *ring)
+{
+ ring->flags |= ICE_TX_FLAGS_RING_XDP;
+}
+
+/**
+ * ice_xsk_umem - get XDP UMEM bound to a ring
+ * @ring - ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * NULL otherwise.
+ */
+static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+{
+ struct xdp_umem **umems = ring->vsi->xsk_umems;
+ int qid = ring->q_index;
+
+ if (ice_ring_is_xdp(ring))
+ qid -= ring->vsi->num_xdp_txq;
+
+ if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ return NULL;
+
+ return umems[qid];
+}
+
/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
@@ -443,14 +495,15 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
-#endif /* CONFIG_DCB */
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 023e3d2fee5f..5421fc413f94 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -742,6 +742,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
+struct ice_aqc_conf_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
@@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem {
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
+/* Rate limiting profile for
+ * Add RL profile (indirect 0x0410)
+ * Query RL profile (indirect 0x0411)
+ * Remove RL profile (indirect 0x0415)
+ * These indirect commands acts on single or multiple
+ * RL profiles with specified data.
+ */
+struct ice_aqc_rl_profile {
+ __le16 num_profiles;
+ __le16 num_processed; /* Only for response. Reserved in Command. */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_rl_profile_elem {
+ u8 level;
+ u8 flags;
+#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
+#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
+#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
+#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
+#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
+/* The following flag is used for Query RL Profile Data */
+#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
+#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
+
+ __le16 profile_id;
+ __le16 max_burst_size;
+ __le16 rl_multiply;
+ __le16 wake_up_calc;
+ __le16 rl_encode;
+};
+
+struct ice_aqc_rl_profile_generic_elem {
+ struct ice_aqc_rl_profile_elem generic[1];
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -1044,6 +1086,10 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 reserved1;
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
@@ -1147,6 +1193,33 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ice_aqc_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F
+#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF
+#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10)
+#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0
+#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S)
+#define ICE_AQC_SFF_NO_PAGE_CHANGE 0
+#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1
+#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2
+#define ICE_AQC_SFF_IS_WRITE BIT(15)
+ __le16 i2c_mem_addr;
+ __le16 eeprom_page;
+#define ICE_AQC_SFF_EEPROM_BANK_S 0
+#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S)
+#define ICE_AQC_SFF_EEPROM_PAGE_S 8
+#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S)
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1618,6 +1691,7 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
@@ -1625,6 +1699,7 @@ struct ice_aq_desc {
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
+ struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
@@ -1726,12 +1801,15 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_add_rl_profiles = 0x0410,
ice_aqc_opc_query_sched_res = 0x0412,
+ ice_aqc_opc_remove_rl_profiles = 0x0415,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
@@ -1741,6 +1819,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
new file mode 100644
index 000000000000..69d2da14fe5c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_base.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
+{
+ int offset, i;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
+ }
+
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
+{
+ int i, index = 0;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+err_scatter:
+ for (index = 0; index < i; index++) {
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_ring *ring;
+
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
+ v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(&pf->pdev->dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
+ * ice_calc_q_handle - calculate the queue handle
+ * @vsi: VSI that ring belongs to
+ * @ring: ring to get the absolute queue index
+ * @tc: traffic class number
+ */
+static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+{
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc,
+ "XDP ring can't belong to TC other than 0");
+
+ /* Idea here for calculation is that we subtract the number of queue
+ * count from TC that ring belongs to from it's absolute queue index
+ * and as a result we get the queue's index within TC.
+ */
+ return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF ID */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ struct ice_vsi *vsi = ring->vsi;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ struct ice_hw *hw;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ hw = &vsi->back->hw;
+
+ /* what is Rx queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index);
+
+ ring->xsk_umem = ice_xsk_umem(ring);
+ if (ring->xsk_umem) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ring->zca.free = ice_zca_free;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca);
+ if (err)
+ return err;
+
+ dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+ /* Receive Queue Base Address.
+ * Indicates the starting address of the descriptor queue defined in
+ * 128 Byte units.
+ */
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ chain_len * ring->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ ice_clear_ring_build_skb_ena(ring);
+ else
+ ice_set_ring_build_skb_ena(ring);
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ err = ring->xsk_umem ?
+ ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+ if (err)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->q_index, pf_q);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
+ */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+ u32 rx_reg;
+
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ int err;
+
+ if (vsi->q_vectors[0]) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ num_q_vectors = vsi->num_q_vectors;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(&pf->pdev->dev,
+ "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @qg_buf: queue group buffer
+ */
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+ u8 tc;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ ice_cfg_itr_gran(hw);
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+}
+
+/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
+ val);
+ }
+ ice_flush(hw);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
+ */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 val;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc;
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
new file mode 100644
index 000000000000..407995e8e944
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_BASE_H_
+#define _ICE_BASE_H_
+
+#include "ice.h"
+
+int ice_setup_rx_ctx(struct ice_ring *ring);
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf);
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3a6b3950eb0e..36be501ae623 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -855,6 +855,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
+ /* Initialize max burst size */
+ if (!hw->max_burst_size)
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -1067,6 +1070,72 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
@@ -1182,56 +1251,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
-/**
- * ice_debug_cq
- * @hw: pointer to the hardware structure
- * @mask: debug mask
- * @desc: pointer to control queue descriptor
- * @buf: pointer to command buffer
- * @buf_len: max length of buf
- *
- * Dumps debug log about control command with descriptor contents.
- */
-void
-ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
- u16 buf_len)
-{
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
- u16 len;
-
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (!(mask & hw->debug_mask))
- return;
-#endif
-
- if (!desc)
- return;
-
- len = le16_to_cpu(cq_desc->datalen);
-
- ice_debug(hw, mask,
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.addr_high),
- le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, mask, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
- }
-}
-
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -2556,6 +2575,52 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_sff_eeprom
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @set_page: set or ignore the page
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read/Write SFF EEPROM (0x06EE)
+ */
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sff_eeprom *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || (mem_addr & 0xff00))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
+ ((set_page <<
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ if (write)
+ cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+ return status;
+}
+
+/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -3148,7 +3213,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @tc: TC number
* @q_handle: software queue handle
*/
-static struct ice_q_ctx *
+struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
{
struct ice_vsi_ctx *vsi;
@@ -3245,9 +3310,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
q_ctx->q_handle = q_handle;
+ q_ctx->q_teid = le32_to_cpu(node.node_teid);
- /* add a leaf node into schduler tree queue layer */
+ /* add a leaf node into scheduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ if (!status)
+ status = ice_sched_replay_q_bw(pi, q_ctx);
ena_txq_exit:
mutex_unlock(&pi->sched_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index c3df92f57777..b22aa561e253 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,16 +6,18 @@
#include "ice.h"
#include "ice_type.h"
+#include "ice_nvm.h"
#include "ice_flex_pipe.h"
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-void
-ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -117,6 +119,10 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
@@ -133,6 +139,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2353166c654e..dd946866d7b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -810,6 +810,52 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 len;
+
+ if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
+ !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
+ return;
+
+ if (!desc)
+ return;
+
+ len = le16_to_cpu(cq_desc->datalen);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(cq_desc->opcode),
+ le16_to_cpu(cq_desc->flags),
+ le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_high),
+ le32_to_cpu(cq_desc->cookie_low));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param0),
+ le32_to_cpu(cq_desc->params.generic.param1));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ if (buf && cq_desc->datalen != 0) {
+ ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
+ }
+}
+
+/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -934,10 +980,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
- ice_debug(hw, ICE_DBG_AQ_MSG,
+ ice_debug(hw, ICE_DBG_AQ_DESC,
"ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -948,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ice_sq_done(hw, cq))
break;
- mdelay(1);
+ udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
@@ -971,7 +1017,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval = le16_to_cpu(desc->retval);
if (retval) {
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue command completed with error 0x%x\n",
+ "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
retval);
/* strip off FW internal code */
@@ -986,7 +1033,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG,
"ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc, buf, buf_size);
/* save writeback AQ if requested */
if (details->wb_desc)
@@ -1075,7 +1122,8 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive Queue Event received with error 0x%x\n",
+ "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
cq->rq_last_status);
}
memcpy(&e->desc, desc, sizeof(e->desc));
@@ -1084,10 +1132,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
- ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
- cq->rq_buf_size);
+ ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 44945c2165d8..4df9da359135 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -31,8 +31,9 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue default settings */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index dd7efff121bd..713e8a892e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -965,9 +965,9 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
- pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
if (ret)
return ret;
+ pi->is_sw_lldp = false;
} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -975,8 +975,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
/* Configure the LLDP MIB change event */
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
- if (!ret)
- pi->is_sw_lldp = false;
+ if (ret)
+ pi->is_sw_lldp = true;
}
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index dd47869c4ad4..1150dbd98d0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -2,6 +2,9 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
+
+static void ice_pf_dcb_recfg(struct ice_pf *pf);
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
@@ -100,6 +103,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_tc - Get the TC associated with the queue
+ * @vsi: ptr to the VSI
+ * @queue_index: queue number associated with VSI
+ */
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
+{
+ return vsi->tx_rings[queue_index]->dcb_tc;
+}
+
+/**
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
* @vsi: VSI owner of rings being updated
*/
@@ -138,56 +151,24 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
- * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
- * @pf: pointer to the PF struct
- *
- * Assumed caller has already disabled all VSIs before
- * calling this function. Reconfiguring DCB based on
- * local_dcbx_cfg.
- */
-static void ice_pf_dcb_recfg(struct ice_pf *pf)
-{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
- u8 tc_map = 0;
- int v, ret;
-
- /* Update each VSI */
- ice_for_each_vsi(pf, v) {
- if (!pf->vsi[v])
- continue;
-
- if (pf->vsi[v]->type == ICE_VSI_PF)
- tc_map = ice_dcb_get_ena_tc(dcbcfg);
- else
- tc_map = ICE_DFLT_TRAFFIC_CLASS;
-
- ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to config TC for VSI index: %d\n",
- pf->vsi[v]->idx);
- continue;
- }
-
- ice_vsi_map_rings_to_vectors(pf->vsi[v]);
- }
-}
-
-/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
* @locked: is the RTNL held
*/
-static
int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
- struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- int ret = 0;
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ int ret = ICE_DCB_NO_HW_CHG;
+ struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ /* FW does not care if change happened */
+ if (!pf->hw.port_info->is_sw_lldp)
+ ret = ICE_DCB_HW_CHG_RST;
+
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
@@ -203,18 +184,28 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
}
/* Store old config in case FW config fails */
- old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
- memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+ old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
+ if (!old_cfg)
+ return -ENOMEM;
+
+ dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
+ ret = -EINVAL;
+ goto free_cfg;
+ }
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
if (!locked)
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+ memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
@@ -238,10 +229,11 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
if (!locked)
rtnl_unlock();
- devm_kfree(&pf->pdev->dev, old_cfg);
+free_cfg:
+ kfree(old_cfg);
return ret;
}
@@ -437,9 +429,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
/**
* ice_dcb_sw_default_config - Apply a default DCB config
* @pf: PF to apply config to
+ * @ets_willing: configure ets willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -454,7 +447,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
memset(dcbcfg, 0, sizeof(*dcbcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
- dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
@@ -480,6 +473,104 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
}
/**
+ * ice_dcb_tc_contig - Check that TCs are contiguous
+ * @prio_table: pointer to priority table
+ *
+ * Check if TCs begin with TC0 and are contiguous
+ */
+static bool ice_dcb_tc_contig(u8 *prio_table)
+{
+ u8 max_tc = 0;
+ int i;
+
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ u8 cur_tc = prio_table[i];
+
+ if (cur_tc > max_tc)
+ return false;
+ else if (cur_tc == max_tc)
+ max_tc++;
+ }
+
+ return true;
+}
+
+/**
+ * ice_dcb_noncontig_cfg - Configure DCB for non-contiguous TCs
+ * @pf: pointer to the PF struct
+ *
+ * If non-contiguous TCs, then configure SW DCB with TC0 and ETS non-willing
+ */
+static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ int ret;
+
+ /* Configure SW DCB default with ETS non-willing */
+ ret = ice_dcb_sw_dflt_cfg(pf, false, true);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set local DCB config %d\n", ret);
+ return ret;
+ }
+
+ /* Reconfigure with ETS willing so that FW will send LLDP MIB event */
+ dcbcfg->etscfg.willing = 1;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+
+ return ret;
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+static void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ if (pf->vsi[v]->type == ICE_VSI_PF) {
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+
+ /* If DCBX request non-contiguous TC, then configure
+ * default TC
+ */
+ if (!ice_dcb_tc_contig(dcbcfg->etscfg.prio_table)) {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ ice_dcb_noncontig_cfg(pf);
+ }
+ } else {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ }
+
+ ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to config TC for VSI index: %d\n",
+ pf->vsi[v]->idx);
+ continue;
+ }
+
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ if (pf->vsi[v]->type == ICE_VSI_PF)
+ ice_dcbnl_set_all(pf->vsi[v]);
+ }
+}
+
+/**
* ice_init_pf_dcb - initialize DCB for a PF
* @pf: PF to initialize DCB for
* @locked: Was function called with RTNL held
@@ -503,11 +594,13 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
+ struct ice_vsi *pf_vsi;
+
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(&pf->pdev->dev,
"FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- err = ice_dcb_sw_dflt_cfg(pf, locked);
+ err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to set local DCB config %d\n", err);
@@ -515,6 +608,19 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
+ /* If the FW DCBX engine is not running then Rx LLDP packets
+ * need to be redirected up the stack.
+ */
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set local DCB config\n");
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ ice_cfg_sw_lldp(pf_vsi, false, true);
+
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
return 0;
}
@@ -627,6 +733,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
+ struct ice_vsi *pf_vsi;
u8 type;
int ret;
@@ -686,6 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
+ ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
return;
@@ -698,8 +806,14 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
+ return;
+ }
+
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -711,6 +825,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 661a6f7bca64..e90e25b7da77 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -5,14 +5,21 @@
#define _ICE_DCB_LIB_H_
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#ifdef CONFIG_DCB
-#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
+#define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */
+#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
+int
+ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -41,6 +48,13 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
+static inline u8
+ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
+ int __always_unused queue_index)
+{
+ return 0;
+}
+
static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
@@ -49,6 +63,14 @@ ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
}
static inline int
+ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
+ struct ice_dcbx_cfg __always_unused *new_cfg,
+ bool __always_unused locked)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
new file mode 100644
index 000000000000..3c90fc0a3feb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_dcb.h"
+#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
+#include <net/dcbnl.h>
+
+#define ICE_APP_PROT_ID_ROCE 0x8915
+
+/**
+ * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
+ * @netdev: device associated with interface that needs reset
+ */
+static void ice_dcbnl_devreset(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ while (ice_is_reset_in_progress(pf->state))
+ usleep_range(1000, 2000);
+
+ set_bit(__ICE_DCBNL_DEVRESET, pf->state);
+ dev_close(netdev);
+ netdev_state_change(netdev);
+ dev_open(netdev, NULL);
+ netdev_state_change(netdev);
+ clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
+}
+
+/**
+ * ice_dcbnl_getets - retrieve local ETS configuration
+ * @netdev: the relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setets - set IEEE ETS configuration
+ * @netdev: pointer to relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int bwcfg = 0, bwrec = 0;
+ int err, i, max_tc = 0;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg->etscfg.willing = ets->willing;
+ new_cfg->etscfg.cbs = ets->cbs;
+ ice_for_each_traffic_class(i) {
+ new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ bwcfg += ets->tc_tx_bw[i];
+ new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ bwrec += ets->tc_reco_bw[i];
+ new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
+
+ /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
+ * for the TC's index number. Add one to value if not zero, and
+ * for zero set it to the FW's default value
+ */
+ if (max_tc)
+ max_tc++;
+ else
+ max_tc = IEEE_8021QAZ_MAX_TCS;
+
+ new_cfg->etscfg.maxtcs = max_tc;
+
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
+ if (!bwrec)
+ new_cfg->etsrec.tcbwtable[0] = 100;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @dev: pointer to netdev struct
+ * @tcid: TC ID
+ * @num: total number of TCs supported by the adapter
+ *
+ * Return the total number of TCs supported
+ */
+static int
+ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return -EINVAL;
+
+ *num = IEEE_8021QAZ_MAX_TCS;
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getdcbx - retrieve current DCBX capability
+ * @netdev: pointer to the netdev struct
+ */
+static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * ice_dcbnl_setdcbx - set required DCBX capability
+ * @netdev: the corresponding netdev
+ * @mode: required mode
+ */
+static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ else
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+
+ dev_info(&pf->pdev->dev, "DCBx mode = 0x%x\n", mode);
+ return ICE_DCB_HW_CHG_RST;
+}
+
+/**
+ * ice_dcbnl_get_perm_hw_addr - MAC address used by DCBX
+ * @netdev: pointer to netdev struct
+ * @perm_addr: buffer to return permanent MAC address
+ */
+static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < netdev->addr_len; i++)
+ perm_addr[i] = pi->mac.perm_addr[i];
+
+ for (j = 0; j < netdev->addr_len; j++, i++)
+ perm_addr[i] = pi->mac.perm_addr[j];
+}
+
+/**
+ * ice_get_pfc_delay - Retrieve PFC Link Delay
+ * @hw: pointer to HW struct
+ * @delay: holds the PFC Link Delay value
+ */
+static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, PRTDCB_GENC);
+ *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+}
+
+/**
+ * ice_dcbnl_getpfc - retrieve local IEEE PFC config
+ * @netdev: pointer to netdev struct
+ * @pfc: struct to hold PFC info
+ */
+static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ struct ice_dcbx_cfg *dcbxcfg;
+ int i;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcena;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ ice_get_pfc_delay(&pf->hw, &pfc->delay);
+
+ ice_for_each_traffic_class(i) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setpfc - set local IEEE PFC config
+ * @netdev: pointer to relevant netdev
+ * @pfc: pointer to struct holding PFC config
+ */
+static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ if (pfc->pfc_cap)
+ new_cfg->pfc.pfccap = pfc->pfc_cap;
+ else
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+
+ new_cfg->pfc.pfcena = pfc->pfc_en;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_get_pfc_cfg - Get CEE PFC config
+ * @netdev: pointer to netdev struct
+ * @prio: corresponding user priority
+ * @setting: the PFC setting for given priority
+ */
+static void
+ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ dev_dbg(&pf->pdev->dev,
+ "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_set_pfc_cfg - Set CEE PFC config
+ * @netdev: the corresponding netdev
+ * @prio: User Priority
+ * @set: PFC setting to apply
+ */
+static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+ if (set)
+ new_cfg->pfc.pfcena |= BIT(prio);
+ else
+ new_cfg->pfc.pfcena &= ~BIT(prio);
+
+ dev_dbg(&pf->pdev->dev, "Set PFC config UP:%d set:%d pfcena:0x%x\n",
+ prio, set, new_cfg->pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_getpfcstate - get CEE PFC mode
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ /* Return enabled if any UP enabled for PFC */
+ if (pi->local_dcbx_cfg.pfc.pfcena)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getstate - get DCB enabled state
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u8 state = 0;
+
+ state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ dev_dbg(&pf->pdev->dev, "DCB enabled state = %d\n", state);
+ return state;
+}
+
+/**
+ * ice_dcbnl_setstate - Set CEE DCB state
+ * @netdev: pointer to relevant netdev
+ * @state: state value to set
+ */
+static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Nothing to do */
+ if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return ICE_DCB_NO_HW_CHG;
+
+ if (state) {
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ memcpy(&pf->hw.port_info->desired_dcbx_cfg,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(struct ice_dcbx_cfg));
+ } else {
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ return ICE_DCB_HW_CHG;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_tx - get CEE PG Tx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ dev_dbg(&pf->pdev->dev,
+ "Get PG config prio=%d tc=%d\n", prio, *pgid);
+}
+
+/**
+ * ice_dcbnl_set_pg_tc_cfg_tx - set CEE PG Tx config
+ * @netdev: pointer to relevant netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group ID the TC belongs to
+ * @bw_pct: the BW perventage for the BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 __always_unused prio_type,
+ u8 __always_unused bwg_id,
+ u8 __always_unused bw_pct, u8 up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int i;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ ice_for_each_traffic_class(i) {
+ if (up_map & BIT(i))
+ new_cfg->etscfg.prio_table[i] = tc;
+ }
+ new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_tx - Get CEE PGBW config
+ * @netdev: pointer to the netdev struct
+ * @pgid: corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * ice_dcbnl_set_pg_bwg_cfg_tx - set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: Correspongind traffic class
+ * @bw_pct: the BW percentage for the specified TC
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: pointer to netdev struct
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ *bw_pct = 0;
+}
+
+/**
+ * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: pointer to netdev struct
+ * @capid: the capability type
+ * @cap: the capability value
+ */
+static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
+ return ICE_DCB_NO_HW_CHG;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(&pf->pdev->dev, "DCBX Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getapp - get CEE APP
+ * @netdev: pointer to netdev struct
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ */
+static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * ice_dcbnl_find_app - Search for APP in given DCB config
+ * @cfg: struct to hold DCBX config
+ * @app: struct to hold app data to look for
+ */
+static bool
+ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
+ struct ice_dcb_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->prot_id == cfg->app[i].prot_id &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_dcbnl_setapp - set local IEEE App config
+ * @netdev: relevant netdev struct
+ * @app: struct to hold app config info
+ */
+static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcb_app_priority_table new_app;
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int ret;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ goto setapp_out;
+
+ new_app.selector = app->selector;
+ new_app.prot_id = app->protocol;
+ new_app.priority = app->priority;
+ if (ice_dcbnl_find_app(old_cfg, &new_app)) {
+ ret = 0;
+ goto setapp_out;
+ }
+
+ new_cfg->app[new_cfg->numapps++] = new_app;
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+setapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_delapp - Delete local IEEE App config
+ * @netdev: relevant netdev
+ * @app: struct to hold app too delete
+ *
+ * Will not delete first application required by the FW
+ */
+static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int i, j, ret = 0;
+
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ goto delapp_out;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == 1)
+ goto delapp_out;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ for (i = 1; i < new_cfg->numapps; i++) {
+ if (app->selector == new_cfg->app[i].selector &&
+ app->protocol == new_cfg->app[i].prot_id &&
+ app->priority == new_cfg->app[i].priority) {
+ new_cfg->app[i].selector = 0;
+ new_cfg->app[i].prot_id = 0;
+ new_cfg->app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* Did not find DCB App */
+ if (i == new_cfg->numapps) {
+ ret = -EINVAL;
+ goto delapp_out;
+ }
+
+ new_cfg->numapps--;
+
+ for (j = i; j < new_cfg->numapps; j++) {
+ new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ }
+
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+delapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_cee_set_all - Commit CEE DCB settings to HW
+ * @netdev: the corresponding netdev
+ */
+static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+
+ mutex_unlock(&pf->tc_mutex);
+ return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = ice_dcbnl_getets,
+ .ieee_setets = ice_dcbnl_setets,
+ .ieee_getpfc = ice_dcbnl_getpfc,
+ .ieee_setpfc = ice_dcbnl_setpfc,
+ .ieee_setapp = ice_dcbnl_setapp,
+ .ieee_delapp = ice_dcbnl_delapp,
+
+ /* CEE std */
+ .getstate = ice_dcbnl_getstate,
+ .setstate = ice_dcbnl_setstate,
+ .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = ice_dcbnl_set_pfc_cfg,
+ .getpfccfg = ice_dcbnl_get_pfc_cfg,
+ .setall = ice_dcbnl_cee_set_all,
+ .getcap = ice_dcbnl_get_cap,
+ .getnumtcs = ice_dcbnl_getnumtcs,
+ .getpfcstate = ice_dcbnl_getpfcstate,
+ .getapp = ice_dcbnl_getapp,
+
+ /* DCBX configuration */
+ .getdcbx = ice_dcbnl_getdcbx,
+ .setdcbx = ice_dcbnl_setdcbx,
+};
+
+/**
+ * ice_dcbnl_set_all - set all the apps and ieee data from DCBX config
+ * @vsi: pointer to VSI struct
+ */
+void ice_dcbnl_set_all(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct dcb_app sapp;
+ struct ice_pf *pf;
+ int i;
+
+ if (!netdev)
+ return;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+
+ /* SW DCB taken care of by SW Default Config */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
+ /* DCB not enabled */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ u8 prio, tc_map;
+
+ prio = dcbxcfg->app[i].priority;
+ tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_cfg.ena_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].prot_id;
+ sapp.priority = prio;
+ dcb_ieee_setapp(netdev, &sapp);
+ }
+ }
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * ice_dcbnl_vsi_del_app - Delete APP on all VSIs
+ * @vsi: pointer to the main VSI
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ */
+static void
+ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
+ struct ice_dcb_app_priority_table *app)
+{
+ struct dcb_app sapp;
+ int err;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->prot_id;
+ sapp.priority = app->priority;
+ err = ice_dcbnl_delapp(vsi->netdev, &sapp);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ vsi->idx, err, app->selector, app->prot_id, app->priority);
+}
+
+/**
+ * ice_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPS that are not present in the passed
+ * DCB configuration
+ */
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
+ int i;
+
+ if (!main_vsi)
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ struct ice_dcb_app_priority_table app = old_cfg->app[i];
+
+ /* The APP is not available anymore delete it */
+ if (!ice_dcbnl_find_app(new_cfg, &app))
+ ice_dcbnl_vsi_del_app(main_vsi, &app);
+ }
+}
+
+/**
+ * ice_dcbnl_setup - setup DCBNL
+ * @vsi: VSI to get associated netdev from
+ */
+void ice_dcbnl_setup(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return;
+
+ netdev->dcbnl_ops = &dcbnl_ops;
+ ice_dcbnl_set_all(vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
new file mode 100644
index 000000000000..6c630a362293
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_NL_H_
+#define _ICE_DCB_NL_H_
+
+#ifdef CONFIG_DCB
+void ice_dcbnl_setup(struct ice_vsi *vsi);
+void ice_dcbnl_set_all(struct ice_vsi *vsi);
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg);
+#else
+#define ice_dcbnl_setup(vsi) do {} while (0)
+#define ice_dcbnl_set_all(vsi) do {} while (0)
+#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
+#endif /* CONFIG_DCB */
+
+#endif /* _ICE_DCB_NL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7e23034df955..1f00091f7906 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,6 +156,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -623,7 +624,7 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
continue;
rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page);
+ received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -1205,11 +1206,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
-
- /* Forward LLDP packets to default VSI so that they
- * are passed up the stack
- */
- ice_cfg_sw_lldp(vsi, false, true);
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -1256,6 +1252,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
"Fail to enable MIB change events\n");
}
}
+ if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
+ /* down and up VSI so that changes of Rx cfg are reflected. */
+ ice_down(vsi);
+ ice_up(vsi);
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2577,6 +2578,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *xdp_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2611,6 +2613,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
return 0;
}
+ /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (ice_xsk_any_rx_ring_ena(vsi))
+ return -EBUSY;
+
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
@@ -2624,6 +2633,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
+ if (ice_is_xdp_ena_vsi(vsi))
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->count = new_tx_cnt;
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2635,14 +2649,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -2650,15 +2664,43 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
- while (i) {
- i--;
+ while (i--)
ice_clean_tx_ring(&tx_rings[i]);
- }
devm_kfree(&pf->pdev->dev, tx_rings);
goto done;
}
}
+ if (!ice_is_xdp_ena_vsi(vsi))
+ goto process_rx;
+
+ /* alloc updated XDP resources */
+ netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
+ vsi->xdp_rings[0]->count, new_tx_cnt);
+
+ xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
+ sizeof(*xdp_rings), GFP_KERNEL);
+ if (!xdp_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ /* clone ring and setup updated count */
+ xdp_rings[i] = *vsi->xdp_rings[i];
+ xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].desc = NULL;
+ xdp_rings[i].tx_buf = NULL;
+ err = ice_setup_tx_ring(&xdp_rings[i]);
+ if (err) {
+ while (i--)
+ ice_clean_tx_ring(&xdp_rings[i]);
+ devm_kfree(&pf->pdev->dev, xdp_rings);
+ goto free_tx;
+ }
+ ice_set_ring_xdp(&xdp_rings[i]);
+ }
+
process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
@@ -2667,14 +2709,14 @@ process_rx:
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
@@ -2712,7 +2754,7 @@ process_link:
ice_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
@@ -2720,7 +2762,7 @@ process_link:
}
if (rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
@@ -2737,6 +2779,16 @@ process_link:
devm_kfree(&pf->pdev->dev, rx_rings);
}
+ if (xdp_rings) {
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ *vsi->xdp_rings[i] = xdp_rings[i];
+ }
+ devm_kfree(&pf->pdev->dev, xdp_rings);
+ }
+
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
ice_up(vsi);
}
goto done;
@@ -2744,7 +2796,7 @@ process_link:
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]);
devm_kfree(&pf->pdev->dev, tx_rings);
}
@@ -3398,6 +3450,151 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
+#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define ICE_MODULE_TYPE_SFP 0x03
+#define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
+#define ICE_MODULE_TYPE_QSFP28 0x11
+#define ICE_MODULE_SFF_ADDR_MODE 0x04
+#define ICE_MODULE_SFF_DIAG_CAPAB 0x40
+#define ICE_MODULE_REVISION_ADDR 0x01
+#define ICE_MODULE_SFF_8472_COMP 0x5E
+#define ICE_MODULE_SFF_8472_SWAP 0x5C
+#define ICE_MODULE_QSFP_MAX_LEN 640
+
+/**
+ * ice_get_module_info - get SFF module type and revision information
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ */
+static int
+ice_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 sff8472_comp = 0;
+ u8 sff8472_swap = 0;
+ u8 sff8636_rev = 0;
+ u8 value = 0;
+
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
+ 0, &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ switch (value) {
+ case ICE_MODULE_TYPE_SFP:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_COMP, 0x00, 0,
+ &sff8472_comp, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
+ &sff8472_swap, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp &&
+ (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ }
+ break;
+ case ICE_MODULE_TYPE_QSFP_PLUS:
+ case ICE_MODULE_TYPE_QSFP28:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_REVISION_ADDR, 0x00, 0,
+ &sff8636_rev, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ /* Check revision compliance */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ default:
+ netdev_warn(netdev,
+ "SFF Module Type not recognized.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ */
+static int
+ice_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ bool is_sfp = false;
+ u16 offset = 0;
+ u8 value = 0;
+ u8 page = 0;
+ int i;
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
+ &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (!ee || !ee->len || !data)
+ return -EINVAL;
+
+ if (value == ICE_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < ee->len; i++) {
+ offset = i + ee->offset;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
+ offset -= ETH_MODULE_SFF_8079_LEN;
+ addr = ICE_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
+ page++;
+ }
+ }
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp,
+ &value, 1, 0, NULL);
+ if (status)
+ value = 0;
+ data[i] = value;
+ }
+ return 0;
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
@@ -3433,6 +3630,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
+ .get_module_info = ice_get_module_info,
+ .get_module_eeprom = ice_get_module_eeprom,
};
static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 152fbd556e9b..e8f32350fed2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -52,6 +52,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENC 0x00083000
+#define PRTDCB_GENC_PFCLDA_S 16
+#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 2aac8f13daeb..ad34f22d44ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -211,7 +211,7 @@ enum ice_flex_rx_mdid {
/* Rx/Tx Flag64 packet flag bits */
enum ice_flg64_bits {
ICE_FLG_PKT_DSI = 0,
- ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x8100 = 14,
ICE_FLG_EVLAN_x9100,
ICE_FLG_VLAN_x8100,
ICE_FLG_TNL_MAC = 22,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cc755382df25..d71f7ce0a265 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2,232 +2,26 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
- */
-static int ice_setup_rx_ctx(struct ice_ring *ring)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is Rx queue number in global space of 2K Rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> 7;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
- }
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- if (vsi->type == ICE_VSI_VF)
- return 0;
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
+ * ice_vsi_type_str - maps VSI type enum to string equivalents
+ * @type: VSI type enum
*/
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+const char *ice_vsi_type_str(enum ice_vsi_type type)
{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring);
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_LB:
- /* fall through */
+ switch (type) {
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
+ return "ICE_VSI_PF";
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
- break;
+ return "ICE_VSI_VF";
+ case ICE_VSI_LB:
+ return "ICE_VSI_LB";
default:
- return;
+ return "unknown";
}
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
- QRX_CTRL_QENA_STAT_M))
- return 0;
-
- usleep_range(20, 40);
- }
-
- return -ETIMEDOUT;
-}
-
-/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
- * @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
-{
- int pf_q = vsi->rxq_map[rxq_idx];
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
- u32 rx_reg;
-
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- return 0;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
-
- return ret;
}
/**
@@ -270,7 +64,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rings;
- vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
+ vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
@@ -281,7 +76,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rxq_map)
goto err_rxq_map;
-
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
return 0;
@@ -606,88 +400,6 @@ unlock_pf:
}
/**
- * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
-{
- int offset, i;
-
- mutex_lock(qs_cfg->qs_mutex);
- offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
- 0, qs_cfg->q_count, 0);
- if (offset >= qs_cfg->pf_map_size) {
- mutex_unlock(qs_cfg->qs_mutex);
- return -ENOMEM;
- }
-
- bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
- for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-}
-
-/**
- * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
-{
- int i, index = 0;
-
- mutex_lock(qs_cfg->qs_mutex);
- for (i = 0; i < qs_cfg->q_count; i++) {
- index = find_next_zero_bit(qs_cfg->pf_map,
- qs_cfg->pf_map_size, index);
- if (index >= qs_cfg->pf_map_size)
- goto err_scatter;
- set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-err_scatter:
- for (index = 0; index < i; index++) {
- clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
- qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return -ENOMEM;
-}
-
-/**
- * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * This function first tries to find contiguous space. If it is not successful,
- * it tries with the scatter approach.
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
-{
- int ret = 0;
-
- ret = __ice_vsi_get_qs_contig(qs_cfg);
- if (ret) {
- /* contig failed, so try with scatter approach */
- qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
- qs_cfg->scatter_count);
- ret = __ice_vsi_get_qs_sc(qs_cfg);
- }
- return ret;
-}
-
-/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -1006,7 +718,8 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_LB:
- dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
+ dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
return;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
@@ -1098,129 +811,6 @@ static int ice_vsi_init(struct ice_vsi *vsi)
}
/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
- */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_q_vector *q_vector;
- struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
-
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
- return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&pf->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
-
- ice_for_each_q_vector(vsi, v_idx)
- ice_free_q_vector(vsi, v_idx);
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the VSI struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- if (vsi->type == ICE_VSI_VF)
- goto out;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- /* This will not be called in the driver load path because the netdev
- * will not be created yet. All other cases with register the NAPI
- * handler here (i.e. resume, reset/rebuild, etc.)
- */
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
-
-out:
- /* tie q_vector and VSI together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
-}
-
-/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -1341,66 +931,6 @@ err_out:
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#else
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#endif /* CONFIG_DCB */
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_vector->tx.itr_idx = ICE_TX_ITR;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_vector->rx.itr_idx = ICE_RX_ITR;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1674,6 +1204,31 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+#if (PAGE_SIZE < 8192)
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+#else
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#endif
+ }
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1687,13 +1242,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
@@ -1712,101 +1261,34 @@ setup_rings:
}
/**
- * ice_vsi_cfg_txq - Configure single Tx queue
- * @vsi: the VSI that queue belongs to
- * @ring: Tx ring to be configured
- * @tc_q_idx: queue index within given TC
- * @qg_buf: queue group buffer
- * @tc: TC that Tx ring belongs to
- */
-static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
- struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
-{
- struct ice_tlan_ctx tlan_ctx = { 0 };
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
- enum ice_status status;
- u16 pf_q;
-
- pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-
- /* Add unique software queue handle of the Tx queue per
- * TC into the VSI Tx ring
- */
- ring->q_handle = tc_q_idx;
-
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- return -ENODEV;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- ring->txq_teid = le32_to_cpu(txq->q_teid);
-
- return 0;
-}
-
-/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
- * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_pf *pf = vsi->back;
- u16 q_idx = 0, i;
+ u16 q_idx = 0;
int err = 0;
- u8 tc;
- qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- /* set up and configure the Tx queues for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
- qg_buf, tc);
- if (err)
- goto err_cfg_txqs;
-
- q_idx++;
- }
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ goto err_cfg_txqs;
}
+
err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
+ kfree(qg_buf);
return err;
}
@@ -1819,159 +1301,46 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
-}
-
-/**
- * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
- * @intrl: interrupt rate limit in usecs
- * @gran: interrupt rate limit granularity in usecs
- *
- * This function converts a decimal interrupt rate limit in usecs to the format
- * expected by firmware.
- */
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
-{
- u32 val = intrl / gran;
-
- if (val)
- return val | GLINT_RATE_INTRL_ENA_M;
- return 0;
-}
-
-/**
- * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
- * @hw: board specific structure
- */
-static void ice_cfg_itr_gran(struct ice_hw *hw)
-{
- u32 regval = rd32(hw, GLINT_CTL);
-
- /* no need to update global register if ITR gran is already set */
- if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
- return;
-
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
- wr32(hw, GLINT_CTL, regval);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
}
/**
- * ice_cfg_itr - configure the initial interrupt throttle values
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector that's being configured
- *
- * Configure interrupt throttling values for the ring containers that are
- * associated with the interrupt vector passed in.
- */
-static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- ice_cfg_itr_gran(hw);
-
- if (q_vector->num_ring_rx) {
- struct ice_ring_container *rc = &q_vector->rx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-
- if (q_vector->num_ring_tx) {
- struct ice_ring_container *rc = &q_vector->tx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-}
-
-/**
- * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
- * @txq: Tx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
*
- * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
- * within the function space.
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
+ int ret;
+ int i;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ if (ret)
+ return ret;
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ return ret;
}
/**
- * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
- * @vsi: the VSI being configured
- * @rxq: Rx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
*
- * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
- * within the function space.
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
-
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
-
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
-
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ u32 val = intrl / gran;
- ice_flush(hw);
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
}
/**
@@ -2134,109 +1503,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_trigger_sw_intr - trigger a software interrupt
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector to trigger the software interrupt for
- */
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
- (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_M);
-}
-
-/**
- * ice_vsi_stop_tx_ring - Disable single Tx ring
- * @vsi: the VSI being configured
- * @rst_src: reset source
- * @rel_vmvf_num: Relative ID of VF/VM
- * @ring: Tx ring to be stopped
- * @txq_meta: Meta data of Tx ring to be stopped
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 val;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(ring->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(ring->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = ring->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
-
- status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
- txq_meta->tc, 1, &txq_meta->q_handle,
- &txq_meta->q_id, &txq_meta->q_teid, rst_src,
- rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
- * This is not an error as the reset operation disables
- * queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
- } else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ice_fill_txq_meta - Prepare the Tx queue's meta data
- * @vsi: VSI that ring belongs to
- * @ring: ring that txq_meta will be based on
- * @txq_meta: a helper struct that wraps Tx queue's information
- *
- * Set up a helper struct that will contain all the necessary fields that
- * are needed for stopping Tx queue
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- u8 tc = 0;
-
-#ifdef CONFIG_DCB
- tc = ring->dcb_tc;
-#endif /* CONFIG_DCB */
- txq_meta->q_id = ring->reg_idx;
- txq_meta->q_teid = ring->txq_teid;
- txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
-}
-
-/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2247,34 +1513,24 @@ static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings)
{
- u16 i, q_idx = 0;
- int status;
- u8 tc;
+ u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- /* set up the Tx queue list to be disabled for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_txq_meta txq_meta = { };
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ struct ice_txq_meta txq_meta = { };
+ int status;
- if (!rings || !rings[q_idx])
- return -EINVAL;
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
- status = ice_vsi_stop_tx_ring(vsi, rst_src,
- rel_vmvf_num,
- rings[q_idx], &txq_meta);
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- if (status)
- return status;
-
- q_idx++;
- }
+ if (status)
+ return status;
}
return 0;
@@ -2294,6 +1550,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
}
/**
+ * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
+ * @vsi: the VSI being configured
+ */
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2635,23 +1900,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
- * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
- * need to be dropped so that VFs cannot send LLDP packets to reconfig
- * DCB settings in the HW. Also, if the FW DCBX engine is not running
- * then Rx LLDP packets need to be redirected up the stack.
+ * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
+ * be dropped so that VFs cannot send LLDP packets to reconfig DCB
+ * settings in the HW.
*/
- if (!ice_is_safe_mode(pf)) {
+ if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_cfg_sw_lldp(vsi, true, true);
-
- /* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, true);
}
- }
return vsi;
@@ -2690,6 +1949,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
+ }
txq++;
}
@@ -2790,6 +2054,62 @@ void ice_vsi_close(struct ice_vsi *vsi)
}
/**
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
+ * @locked: is the rtnl_lock already held
+ */
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
+{
+ int err = 0;
+
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return 0;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ err = ice_open(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ ice_stop(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
@@ -3064,6 +2384,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -3085,7 +2410,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
-
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3105,6 +2429,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto err_vectors;
+ }
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -3131,9 +2461,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ for (i = 0; i < vsi->tc_cfg.numtc; i++) {
max_txqs[i] = vsi->alloc_txq;
+ if (ice_is_xdp_ena_vsi(vsi))
+ max_txqs[i] += vsi->num_xdp_txq;
+ }
+
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -3166,6 +2500,7 @@ err_vsi:
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
+ test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
@@ -3271,6 +2606,51 @@ char *ice_nvm_version_str(struct ice_hw *hw)
}
/**
+ * ice_update_ring_stats - Update ring statistics
+ * @ring: ring to update
+ * @cont: used to increment per-vector counters
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ *
+ * This function assumes that caller has acquired a u64_stats_sync lock.
+ */
+static void
+ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
+ u64 pkts, u64 bytes)
+{
+ ring->stats.bytes += bytes;
+ ring->stats.pkts += pkts;
+ cont->total_bytes += bytes;
+ cont->total_pkts += pkts;
+}
+
+/**
+ * ice_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ u64_stats_update_end(&tx_ring->syncp);
+}
+
+/**
+ * ice_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ u64_stats_update_end(&rx_ring->syncp);
+}
+
+/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 47bc033fff20..e86aa60c0254 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,18 +6,7 @@
#include "ice.h"
-struct ice_txq_meta {
- /* Tx-scheduler element identifier */
- u32 q_teid;
- /* Entry in VSI's txq_map bitmap */
- u16 q_id;
- /* Relative index of Tx queue within TC */
- u16 q_handle;
- /* VSI index that Tx queue belongs to */
- u16 vsi_idx;
- /* TC number that Tx queue belongs to */
- u8 tc;
-};
+const char *ice_vsi_type_str(enum ice_vsi_type type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -33,24 +22,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
-
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
-#endif /* CONFIG_PCI_IOV */
-
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -67,6 +38,10 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
+
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -89,6 +64,10 @@ int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
+
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
@@ -98,16 +77,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
-
void ice_vsi_put_qs(struct ice_vsi *vsi);
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
-#endif /* CONFIG_DCB */
-
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -118,6 +89,12 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 214cd6eca405..5681e3be81f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,8 +6,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -435,42 +437,11 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
- * @locked: is the rtnl_lock already held
- */
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
-{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- ice_stop(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- } else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
-#ifdef CONFIG_DCB
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#endif /* CONFIG_DCB */
{
int v;
@@ -636,8 +607,14 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
switch (vsi->port_info->phy.link_info.topo_media_conflict) {
case ICE_AQ_LINK_TOPO_CONFLICT:
case ICE_AQ_LINK_MEDIA_CONFLICT:
+ case ICE_AQ_LINK_TOPO_UNREACH_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
break;
+ case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
+ netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ break;
default:
break;
}
@@ -1661,6 +1638,324 @@ free_q_irqs:
}
/**
+ * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
+ * @vsi: VSI to setup Tx rings used by XDP
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ int i;
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring *xdp_ring;
+
+ xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
+
+ if (!xdp_ring)
+ goto free_xdp_rings;
+
+ xdp_ring->q_index = xdp_q_idx;
+ xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
+ xdp_ring->ring_active = false;
+ xdp_ring->vsi = vsi;
+ xdp_ring->netdev = NULL;
+ xdp_ring->dev = dev;
+ xdp_ring->count = vsi->num_tx_desc;
+ vsi->xdp_rings[i] = xdp_ring;
+ if (ice_setup_tx_ring(xdp_ring))
+ goto free_xdp_rings;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ return 0;
+
+free_xdp_rings:
+ for (; i >= 0; i--)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
+ * @vsi: VSI to set the bpf prog on
+ * @prog: the bpf prog pointer
+ */
+static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+ int i;
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ ice_for_each_rxq(vsi, i)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+}
+
+/**
+ * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+ * @vsi: VSI to bring up Tx rings used by XDP
+ * @prog: bpf program that will be assigned to VSI
+ *
+ * Return 0 on success and negative value on error
+ */
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg xdp_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = pf->max_pf_txqs,
+ .q_count = vsi->num_xdp_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = vsi->alloc_txq,
+ .mapping_mode = ICE_VSI_MAP_CONTIG
+ };
+ enum ice_status status;
+ int i, v_idx;
+
+ vsi->xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
+ sizeof(*vsi->xdp_rings), GFP_KERNEL);
+ if (!vsi->xdp_rings)
+ return -ENOMEM;
+
+ vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
+ if (__ice_vsi_get_qs(&xdp_qs_cfg))
+ goto err_map_xdp;
+
+ if (ice_xdp_alloc_setup_rings(vsi))
+ goto clear_xdp_rings;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ /* omit the scheduler update if in reset path; XDP queues will be
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+ */
+ if (ice_is_reset_in_progress(pf->state))
+ return 0;
+
+ /* tell the Tx scheduler that right now we have
+ * additional queues
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed VSI LAN queue config for XDP, error:%d\n",
+ status);
+ goto clear_xdp_rings;
+ }
+ ice_vsi_assign_bpf_prog(vsi, prog);
+
+ return 0;
+clear_xdp_rings:
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+err_map_xdp:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
+ return -ENOMEM;
+}
+
+/**
+ * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+ * @vsi: VSI to remove XDP rings
+ *
+ * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+ * resources
+ */
+int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
+ int i, v_idx;
+
+ /* q_vectors are freed in reset path so there's no point in detaching
+ * rings; in case of rebuild being triggered not from reset reset bits
+ * in pf->state won't be set, so additionally check first q_vector
+ * against NULL
+ */
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ goto free_qmap;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_ring *ring;
+
+ ice_for_each_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.ring = ring;
+ }
+
+free_qmap:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ if (vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+ devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ return 0;
+
+ ice_vsi_assign_bpf_prog(vsi, NULL);
+
+ /* notify Tx scheduler that we destroyed XDP queues and bring
+ * back the old number of child nodes
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+}
+
+/**
+ * ice_xdp_setup_prog - Add or remove XDP eBPF program
+ * @vsi: VSI to setup XDP for
+ * @prog: XDP program
+ * @extack: netlink extended ack
+ */
+static int
+ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
+
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
+ return -EOPNOTSUPP;
+ }
+
+ /* need to stop netdev while setting up the program for Rx rings */
+ if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Preparing device for XDP attach failed");
+ return ret;
+ }
+ }
+
+ if (!ice_is_xdp_ena_vsi(vsi) && prog) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting up XDP Tx resources failed");
+ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Freeing XDP Tx resources failed");
+ } else {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ }
+
+ if (if_running)
+ ret = ice_up(vsi);
+
+ if (!ret && prog && vsi->xsk_umems) {
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_umem)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+ }
+
+ return (ret || xdp_ring_err) ? -ENOMEM : 0;
+}
+
+/**
+ * ice_xdp - implements XDP handler
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
+ return 0;
+ case XDP_SETUP_XSK_UMEM:
+ return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -2219,6 +2514,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = -ENODEV;
goto unroll_vsi_setup;
}
+ /* netdev has to be configured before setting frame size */
+ ice_vsi_cfg_frame_size(vsi);
+
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
@@ -2300,6 +2600,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
+ mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
if (pf->avail_txqs) {
@@ -2349,6 +2650,7 @@ static int ice_init_pf(struct ice_pf *pf)
ice_set_pf_caps(pf);
mutex_init(&pf->sw_mutex);
+ mutex_init(&pf->tc_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
@@ -2598,7 +2900,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
- switch (hw->adminq.sq_last_status) {
+ switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
dev_err(dev,
@@ -2831,6 +3133,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
hw->back = pf;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
@@ -2976,6 +3280,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_cfg_lldp_mib_change(&pf->hw, true);
}
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
+
return 0;
err_alloc_sw_unroll:
@@ -3027,12 +3334,13 @@ static void ice_remove(struct pci_dev *pdev)
}
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
- ice_clear_interrupt_scheme(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pdev);
+ ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
}
@@ -3347,6 +3655,48 @@ static void ice_set_rx_mode(struct net_device *netdev)
}
/**
+ * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Queue ID
+ * @maxrate: maximum bandwidth in Mbps
+ */
+static int
+ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ u16 q_handle;
+ u8 tc;
+
+ /* Validate maxrate requested is within permitted range */
+ if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
+ netdev_err(netdev,
+ "Invalid max rate %d specified for the queue %d\n",
+ maxrate, queue_index);
+ return -EINVAL;
+ }
+
+ q_handle = vsi->tx_rings[queue_index]->q_handle;
+ tc = ice_dcb_get_tc(vsi, queue_index);
+
+ /* Set BW back to default, when user set maxrate to 0 */
+ if (!maxrate)
+ status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW);
+ else
+ status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW, maxrate * 1000);
+ if (status) {
+ netdev_err(netdev,
+ "Unable to set Tx max rate, error %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_fdb_add - add an entry to the hardware database
* @ndm: the input from the stack
* @tb: pointer to array of nladdr (unused)
@@ -3426,6 +3776,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
@@ -3435,6 +3786,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return ret;
}
+ /* Do not change setting during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ return -EBUSY;
+ }
+
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
@@ -3505,6 +3863,8 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
+ if (!err && ice_is_xdp_ena_vsi(vsi))
+ err = ice_vsi_cfg_xdp_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3920,6 +4280,13 @@ int ice_down(struct ice_vsi *vsi)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
+ if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop XDP rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+ }
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
@@ -3970,8 +4337,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- vsi->tx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ struct ice_ring *ring = vsi->tx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_tx_ring(ring);
if (err)
break;
}
@@ -3996,8 +4368,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- vsi->rx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ struct ice_ring *ring = vsi->rx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_rx_ring(ring);
if (err)
break;
}
@@ -4089,54 +4466,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- * @locked: is the rtnl_lock already held
- */
-static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
-{
- int err = 0;
-
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
- return 0;
-
- clear_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- err = ice_open(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- }
- }
-
- return err;
-}
-
-/**
- * ice_pf_ena_all_vsi - Resume all VSIs on a PF
- * @pf: the PF
- * @locked: is the rtnl_lock already held
- */
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v], locked))
- return -EIO;
-
- return 0;
-}
-#endif /* CONFIG_DCB */
-
-/**
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @pf: pointer to the PF instance
* @type: VSI type to rebuild
@@ -4158,8 +4487,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
err = ice_vsi_rebuild(vsi);
if (err) {
dev_err(&pf->pdev->dev,
- "rebuild VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4167,8 +4496,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
dev_err(&pf->pdev->dev,
- "replay VSI failed, status %d, VSI index %d, type %d\n",
- status, vsi->idx, type);
+ "replay VSI failed, status %d, VSI index %d, type %s\n",
+ status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4181,13 +4510,13 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
err = ice_ena_vsi(vsi, false);
if (err) {
dev_err(&pf->pdev->dev,
- "enable VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ "enable VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
- dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
- vsi->idx, type);
+ dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %s\n",
+ vsi->idx, ice_vsi_type_str(type));
}
return 0;
@@ -4329,6 +4658,18 @@ clear_recovery:
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -4347,6 +4688,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ int frame_size = ice_max_xdp_frame_size(vsi);
+
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
+ netdev_err(netdev, "max MTU for XDP usage is %d\n",
+ frame_size - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
+ }
+
if (new_mtu < netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
@@ -4864,6 +5215,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_tx_maxrate = ice_set_tx_maxrate,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
@@ -4878,4 +5230,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index bcb431f1bd92..57c73f613f32 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -219,8 +219,7 @@ static void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -242,9 +241,10 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
+ u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
- enum ice_status status = 0;
+ enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -261,15 +261,15 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode */
+ } else {
+ /* Blank programming mode */
nvm->blank_nvm_mode = true;
- status = ICE_ERR_NVM_BLANK_MODE;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
- return status;
+ return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
@@ -287,9 +287,42 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
- hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
- return status;
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
+ &oem_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
+ &oem_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
+ return status;
+ }
+
+ nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
new file mode 100644
index 000000000000..a9fa011c22c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_NVM_H_
+#define _ICE_NVM_H_
+
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index fc624b73d05d..84f609996ed5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -411,6 +411,27 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
+ * ice_aq_cfg_sched_elems - configures scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_cfgd: returns total number of elements configured
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure scheduling elements (0x0403)
+ */
+static enum ice_status
+ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_conf_elem *buf, u16 buf_size,
+ u16 *elems_cfgd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_cfgd, cd);
+}
+
+/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
@@ -557,6 +578,149 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_aq_rl_profile - performs a rate limiting task
+ * @hw: pointer to the HW struct
+ * @opcode:opcode for add, query, or remove profile(s)
+ * @num_profiles: the number of profiles
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_processed: number of processed add or remove profile(s) to return
+ * @cd: pointer to command details structure
+ *
+ * RL profile function to add, query, or remove profile(s)
+ */
+static enum ice_status
+ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
+ u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_rl_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.rl_profile;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->num_profiles = cpu_to_le16(num_profiles);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_processed)
+ *num_processed = le16_to_cpu(cmd->num_processed);
+ return status;
+}
+
+/**
+ * ice_aq_add_rl_profile - adds rate limiting profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to be add
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_added: total number of profiles added to return
+ * @cd: pointer to command details structure
+ *
+ * Add RL profile (0x0410)
+ */
+static enum ice_status
+ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_added,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_added, cd);
+}
+
+/**
+ * ice_aq_remove_rl_profile - removes RL profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to remove
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_removed: total number of profiles removed to return
+ * @cd: pointer to command details structure or NULL
+ *
+ * Remove RL profile (0x0415)
+ */
+static enum ice_status
+ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_removed,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_removed, cd);
+}
+
+/**
+ * ice_sched_del_rl_profile - remove RL profile
+ * @hw: pointer to the HW struct
+ * @rl_info: rate limit profile information
+ *
+ * If the profile ID is not referenced anymore, it removes profile ID with
+ * its associated parameters from HW DB,and locally. The caller needs to
+ * hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_del_rl_profile(struct ice_hw *hw,
+ struct ice_aqc_rl_profile_info *rl_info)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ u16 num_profiles_removed;
+ enum ice_status status;
+ u16 num_profiles = 1;
+
+ if (rl_info->prof_id_ref != 0)
+ return ICE_ERR_IN_USE;
+
+ /* Safe to remove profile ID */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_info->profile;
+ status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &num_profiles_removed, NULL);
+ if (status || num_profiles_removed != num_profiles)
+ return ICE_ERR_CFG;
+
+ /* Delete stale entry now */
+ list_del(&rl_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_info);
+ return status;
+}
+
+/**
+ * ice_sched_clear_rl_prof - clears RL prof entries
+ * @pi: port information structure
+ *
+ * This function removes all RL profile from HW as well as from SW DB.
+ */
+static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ rl_prof_elem->prof_id_ref = 0;
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ /* On error, free mem required */
+ list_del(&rl_prof_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ }
+ }
+ }
+}
+
+/**
* ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
@@ -592,6 +756,8 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
if (!pi)
return;
+ /* remove RL profiles related lists */
+ ice_sched_clear_rl_prof(pi);
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -1014,6 +1180,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ INIT_LIST_HEAD(&pi->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
@@ -1036,7 +1204,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
__le16 max_sibl;
- u8 i;
+ u16 i;
if (hw->layer_info)
return status;
@@ -1062,8 +1230,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
* and so on. This array will be populated from root (index 0) to
* qgroup layer 7. Leaf node has no children.
*/
- for (i = 0; i < hw->num_tx_sched_layers; i++) {
- max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
+ max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
hw->max_children[i] = le16_to_cpu(max_sibl);
}
@@ -1670,3 +1838,1095 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
+
+/**
+ * ice_sched_rm_unused_rl_prof - remove unused RL profile
+ * @pi: port information structure
+ *
+ * This function removes unused rate limit profiles from the HW and
+ * SW DB. The caller needs to hold scheduler lock.
+ */
+static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Removed rl profile\n");
+ }
+ }
+}
+
+/**
+ * ice_sched_update_elem - update element
+ * @hw: pointer to the HW struct
+ * @node: pointer to node
+ * @info: node info to update
+ *
+ * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * parameters of node from argument info data buffer (Info->data buf) and
+ * returns success or error on config sched element failure. The caller
+ * needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_aqc_conf_elem buf;
+ enum ice_status status;
+ u16 elem_cfgd = 0;
+ u16 num_elems = 1;
+
+ buf.generic[0] = *info;
+ /* Parent TEID is reserved field in this aq call */
+ buf.generic[0].parent_teid = 0;
+ /* Element type is reserved field in this aq call */
+ buf.generic[0].data.elem_type = 0;
+ /* Flags is reserved field in this aq call */
+ buf.generic[0].data.flags = 0;
+
+ /* Update HW DB */
+ /* Configure element node */
+ status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
+ &elem_cfgd, NULL);
+ if (status || elem_cfgd != num_elems) {
+ ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Config success case */
+ /* Now update local SW DB */
+ /* Only copy the data portion of info buffer */
+ node->info.data = info->data;
+ return status;
+}
+
+/**
+ * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @bw_alloc: BW weight/allocation
+ *
+ * This function configures node element's BW allocation.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 bw_alloc)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ if (rl_type == ICE_MIN_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else if (rl_type == ICE_MAX_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else {
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_set_clear_cir_bw - set or clear CIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = 0;
+ } else {
+ /* Save type of BW information */
+ set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_eir_bw - set or clear EIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved shared BW information.
+ */
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ /* save EIR BW information */
+ set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_shared_bw - set or clear shared BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved EIR BW information.
+ */
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ /* save shared BW information */
+ set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = bw;
+ }
+}
+
+/**
+ * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @bw: bandwidth in Kbps
+ *
+ * This function calculates the wakeup parameter of RL profile.
+ */
+static u16 ice_sched_calc_wakeup(s32 bw)
+{
+ s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
+ s32 wakeup_f_int;
+ u16 wakeup = 0;
+
+ /* Get the wakeup integer value */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+ wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
+ if (wakeup_int > 63) {
+ wakeup = (u16)((1 << 15) | wakeup_int);
+ } else {
+ /* Calculate fraction value up to 4 decimals
+ * Convert Integer value to a constant multiplier
+ */
+ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
+ wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
+ ICE_RL_PROF_FREQUENCY,
+ bytes_per_sec);
+
+ /* Get Fraction value */
+ wakeup_f = wakeup_a - wakeup_b;
+
+ /* Round up the Fractional value via Ceil(Fractional value) */
+ if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
+ wakeup_f += 1;
+
+ wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
+ ICE_RL_PROF_MULTIPLIER);
+ wakeup |= (u16)(wakeup_int << 9);
+ wakeup |= (u16)(0x1ff & wakeup_f_int);
+ }
+
+ return wakeup;
+}
+
+/**
+ * ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @bw: bandwidth in Kbps
+ * @profile: profile parameters to return
+ *
+ * This function converts the BW to profile structure format.
+ */
+static enum ice_status
+ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ s64 bytes_per_sec, ts_rate, mv_tmp;
+ bool found = false;
+ s32 encode = 0;
+ s64 mv = 0;
+ s32 i;
+
+ /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
+ if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
+ return status;
+
+ /* Bytes per second from Kbps */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+
+ /* encode is 6 bits but really useful are 5 bits */
+ for (i = 0; i < 64; i++) {
+ u64 pow_result = BIT_ULL(i);
+
+ ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
+ pow_result * ICE_RL_PROF_TS_MULTIPLIER);
+ if (ts_rate <= 0)
+ continue;
+
+ /* Multiplier value */
+ mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
+ ts_rate);
+
+ /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
+ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
+
+ /* First multiplier value greater than the given
+ * accuracy bytes
+ */
+ if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
+ encode = i;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ u16 wm;
+
+ wm = ice_sched_calc_wakeup(bw);
+ profile->rl_multiply = cpu_to_le16(mv);
+ profile->wake_up_calc = cpu_to_le16(wm);
+ profile->rl_encode = cpu_to_le16(encode);
+ status = 0;
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_add_rl_profile - add RL profile
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: specifies in which layer to create profile
+ *
+ * This function first checks the existing list for corresponding BW
+ * parameter. If it exists, it returns the associated profile otherwise
+ * it creates a new rate limit profile for requested BW, and adds it to
+ * the HW DB and local list. It returns the new profile or null on error.
+ * The caller needs to hold the scheduler lock.
+ */
+static struct ice_aqc_rl_profile_info *
+ice_sched_add_rl_profile(struct ice_port_info *pi,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ u16 profiles_added = 0, num_profiles = 1;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return NULL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pi)
+ return NULL;
+ hw = pi->hw;
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ rl_prof_elem->bw == bw)
+ /* Return existing profile ID info */
+ return rl_prof_elem;
+
+ /* Create new profile ID */
+ rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
+ GFP_KERNEL);
+
+ if (!rl_prof_elem)
+ return NULL;
+
+ status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
+ if (status)
+ goto exit_add_rl_prof;
+
+ rl_prof_elem->bw = bw;
+ /* layer_num is zero relative, and fw expects level from 1 to 9 */
+ rl_prof_elem->profile.level = layer_num + 1;
+ rl_prof_elem->profile.flags = profile_type;
+ rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
+
+ /* Create new entry in HW DB */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_prof_elem->profile;
+ status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &profiles_added, NULL);
+ if (status || profiles_added != num_profiles)
+ goto exit_add_rl_prof;
+
+ /* Good entry - add in the list */
+ rl_prof_elem->prof_id_ref = 0;
+ list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ return rl_prof_elem;
+
+exit_add_rl_prof:
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ return NULL;
+}
+
+/**
+ * ice_sched_cfg_node_bw_lmt - configure node sched params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @rl_prof_id: rate limit profile ID
+ *
+ * This function configures node element's BW limit.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u16 rl_prof_id)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_MAX_BW:
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ return ICE_ERR_CFG;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_SHARED_BW:
+ /* Check for removing shared BW */
+ if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
+ /* remove shared profile */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = 0; /* clear SRL field */
+
+ /* enable back EIR to default profile */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ break;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
+ (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
+ ICE_SCHED_DFLT_RL_PROF_ID))
+ return ICE_ERR_CFG;
+ /* EIR BW is set to default, disable it */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
+ /* Okay to enable shared BW now */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = cpu_to_le16(rl_prof_id);
+ break;
+ default:
+ /* Unknown rate limit type */
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
+ * @node: sched node
+ * @rl_type: rate limit type
+ *
+ * If existing profile matches, it returns the corresponding rate
+ * limit profile ID, otherwise it returns an invalid ID as error.
+ */
+static u16
+ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
+ struct ice_aqc_txsched_elem *data;
+
+ data = &node->info.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
+ rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
+ break;
+ case ICE_MAX_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
+ rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
+ break;
+ case ICE_SHARED_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ rl_prof_id = le16_to_cpu(data->srl_id);
+ break;
+ default:
+ break;
+ }
+
+ return rl_prof_id;
+}
+
+/**
+ * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @layer_index: layer index
+ *
+ * This function returns requested profile creation layer.
+ */
+static u8
+ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
+ u8 layer_index)
+{
+ struct ice_hw *hw = pi->hw;
+
+ if (layer_index >= hw->num_tx_sched_layers)
+ return ICE_SCHED_INVAL_LAYER_NUM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (hw->layer_info[layer_index].max_cir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_MAX_BW:
+ if (hw->layer_info[layer_index].max_eir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_SHARED_BW:
+ /* if current layer doesn't support SRL profile creation
+ * then try a layer up or down.
+ */
+ if (hw->layer_info[layer_index].max_srl_profiles)
+ return layer_index;
+ else if (layer_index < hw->num_tx_sched_layers - 1 &&
+ hw->layer_info[layer_index + 1].max_srl_profiles)
+ return layer_index + 1;
+ else if (layer_index > 0 &&
+ hw->layer_info[layer_index - 1].max_srl_profiles)
+ return layer_index - 1;
+ break;
+ default:
+ break;
+ }
+ return ICE_SCHED_INVAL_LAYER_NUM;
+}
+
+/**
+ * ice_sched_get_srl_node - get shared rate limit node
+ * @node: tree node
+ * @srl_layer: shared rate limit layer
+ *
+ * This function returns SRL node to be used for shared rate limit purpose.
+ * The caller needs to hold scheduler lock.
+ */
+static struct ice_sched_node *
+ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
+{
+ if (srl_layer > node->tx_sched_layer)
+ return node->children[0];
+ else if (srl_layer < node->tx_sched_layer)
+ /* Node can't be created without a parent. It will always
+ * have a valid parent except root node.
+ */
+ return node->parent;
+ else
+ return node;
+}
+
+/**
+ * ice_sched_rm_rl_profile - remove RL profile ID
+ * @pi: port information structure
+ * @layer_num: layer number where profiles are saved
+ * @profile_type: profile type like EIR, CIR, or SRL
+ * @profile_id: profile ID to remove
+ *
+ * This function removes rate limit profile from layer 'layer_num' of type
+ * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
+ * scheduler lock.
+ */
+static enum ice_status
+ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ u16 profile_id)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ enum ice_status status = 0;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return ICE_ERR_PARAM;
+ /* Check the existing list for RL profile */
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ le16_to_cpu(rl_prof_elem->profile.profile_id) ==
+ profile_id) {
+ if (rl_prof_elem->prof_id_ref)
+ rl_prof_elem->prof_id_ref--;
+
+ /* Remove old profile ID from database */
+ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ if (status && status != ICE_ERR_IN_USE)
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ break;
+ }
+ if (status == ICE_ERR_IN_USE)
+ status = 0;
+ return status;
+}
+
+/**
+ * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ * @layer_num: layer number where RL profiles are saved
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 layer_num)
+{
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+ u16 rl_prof_id;
+ u16 old_id;
+
+ hw = pi->hw;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ /* No SRL is configured for default case */
+ rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* Remove stale RL profile ID */
+ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
+ old_id == ICE_SCHED_INVAL_PROF_ID)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+}
+
+/**
+ * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @layer_num: layer number where rate limit profiles are saved
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth value
+ *
+ * This function prepares node element's bandwidth to SRL or EIR exclusively.
+ * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
+ * them may be set for any given element. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ u8 layer_num, enum ice_rl_type rl_type, u32 bw)
+{
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node passed in this case, it may be different node */
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* SRL being removed, ice_sched_cfg_node_bw_lmt()
+ * enables EIR to default. EIR is not set in this
+ * case, so no additional action is required.
+ */
+ return 0;
+
+ /* SRL being configured, set EIR to default here.
+ * ice_sched_cfg_node_bw_lmt() disables EIR when it
+ * configures SRL
+ */
+ return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
+ layer_num);
+ } else if (rl_type == ICE_MAX_BW &&
+ node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
+ /* Remove Shared profile. Set default shared BW call
+ * removes shared profile for a node.
+ */
+ return ice_sched_set_node_bw_dflt(pi, node,
+ ICE_SHARED_BW,
+ layer_num);
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_node_bw - set node's bandwidth
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: layer number
+ *
+ * This function adds new profile corresponding to requested BW, configures
+ * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
+ * ID from local database. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_hw *hw = pi->hw;
+ u16 old_id, rl_prof_id;
+
+ rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ if (!rl_prof_info)
+ return status;
+
+ rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
+
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* New changes has been applied */
+ /* Increment the profile ID reference count */
+ rl_prof_info->prof_id_ref++;
+
+ /* Check for old ID removal */
+ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
+ old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num,
+ rl_prof_info->profile.flags,
+ old_id);
+}
+
+/**
+ * ice_sched_set_node_bw_lmt - set node's BW limit
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * It updates node's BW limit parameters like BW RL profile ID of type CIR,
+ * EIR, or SRL. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_sched_node *cfg_node = node;
+ enum ice_status status;
+
+ struct ice_hw *hw;
+ u8 layer_num;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+ /* Remove unused RL profile IDs from HW and SW DB */
+ ice_sched_rm_unused_rl_prof(pi);
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (layer_num >= hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node may be different */
+ cfg_node = ice_sched_get_srl_node(node, layer_num);
+ if (!cfg_node)
+ return ICE_ERR_CFG;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
+ bw);
+ if (status)
+ return status;
+ if (bw == ICE_SCHED_DFLT_BW)
+ return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
+ layer_num);
+ return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
+}
+
+/**
+ * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ return ice_sched_set_node_bw_lmt(pi, node, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_validate_srl_node - Check node for SRL applicability
+ * @node: sched node to configure
+ * @sel_layer: selected SRL layer
+ *
+ * This function checks if the SRL can be applied to a selected layer node on
+ * behalf of the requested node (first argument). This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
+{
+ /* SRL profiles are not available on all layers. Check if the
+ * SRL profile can be applied to a node above or below the
+ * requested node. SRL configuration is possible only if the
+ * selected layer's node has single child.
+ */
+ if (sel_layer == node->tx_sched_layer ||
+ ((sel_layer == node->tx_sched_layer + 1) &&
+ node->num_children == 1) ||
+ ((sel_layer == node->tx_sched_layer - 1) &&
+ (node->parent && node->parent->num_children == 1)))
+ return 0;
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_sched_save_q_bw - save queue node's BW information
+ * @q_ctx: queue context structure
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of queue type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
+{
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_q_bw_lmt - sets queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of queue scheduling node.
+ */
+static enum ice_status
+ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+ struct ice_q_ctx *q_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
+ if (!q_ctx)
+ goto exit_q_bw_lmt;
+ node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
+ goto exit_q_bw_lmt;
+ }
+
+ /* Return error if it is not a leaf node */
+ if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
+ goto exit_q_bw_lmt;
+
+ /* SRL bandwidth layer selection */
+ if (rl_type == ICE_SHARED_BW) {
+ u8 sel_layer; /* selected layer */
+
+ sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (sel_layer >= pi->hw->num_tx_sched_layers) {
+ status = ICE_ERR_PARAM;
+ goto exit_q_bw_lmt;
+ }
+ status = ice_sched_validate_srl_node(node, sel_layer);
+ if (status)
+ goto exit_q_bw_lmt;
+ }
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+ if (!status)
+ status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
+
+exit_q_bw_lmt:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_q_bw_lmt - configure queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ bw);
+}
+
+/**
+ * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ *
+ * This function configures BW default limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_cfg_rl_burst_size - Set burst size value
+ * @hw: pointer to the HW struct
+ * @bytes: burst size in bytes
+ *
+ * This function configures/set the burst size to requested new value. The new
+ * burst size value is used for future rate limit calls. It doesn't change the
+ * existing or previously created RL profiles.
+ */
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+{
+ u16 burst_size_to_prog;
+
+ if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
+ bytes > ICE_MAX_BURST_SIZE_ALLOWED)
+ return ICE_ERR_PARAM;
+ if (ice_round_to_num(bytes, 64) <=
+ ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
+ /* 64 byte granularity case */
+ /* Disable MSB granularity bit */
+ burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
+ /* round number to nearest 64 byte granularity */
+ bytes = ice_round_to_num(bytes, 64);
+ /* The value is in 64 byte chunks */
+ burst_size_to_prog |= (u16)(bytes / 64);
+ } else {
+ /* k bytes granularity case */
+ /* Enable MSB granularity bit */
+ burst_size_to_prog = ICE_KBYTE_GRANULARITY;
+ /* round number to nearest 1024 granularity */
+ bytes = ice_round_to_num(bytes, 1024);
+ /* check rounding doesn't go beyond allowed */
+ if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
+ bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
+ /* The value is in k bytes */
+ burst_size_to_prog |= (u16)(bytes / 1024);
+ }
+ hw->max_burst_size = burst_size_to_prog;
+ return 0;
+}
+
+/**
+ * ice_sched_replay_node_prio - re-configure node priority
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @priority: priority value
+ *
+ * This function configures node element's priority value. It
+ * needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
+ u8 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = priority;
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_sched_replay_node_bw - replay node(s) BW
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @bw_t_info: BW type information
+ *
+ * This function restores node's BW from bw_t_info. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_bw_type_info *bw_t_info)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ u16 bw_alloc;
+
+ if (!node)
+ return status;
+ if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
+ return 0;
+ if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_replay_node_prio(hw, node,
+ bw_t_info->generic);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
+ bw_t_info->cir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->cir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
+ bw_t_info->eir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->eir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
+ bw_t_info->shared_bw);
+ return status;
+}
+
+/**
+ * ice_sched_replay_q_bw - replay queue type node BW
+ * @pi: port information structure
+ * @q_ctx: queue context structure
+ *
+ * This function replays queue type node bandwidth. This function needs to be
+ * called with scheduler lock held.
+ */
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+{
+ struct ice_sched_node *q_node;
+
+ /* Following also checks the presence of node in tree */
+ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!q_node)
+ return ICE_ERR_PARAM;
+ return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 3902a8ad3025..f0593cfb6521 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -8,6 +8,36 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
+/* Burst size is a 12 bits register that is configured while creating the RL
+ * profile(s). MSB is a granularity bit and tells the granularity type
+ * 0 - LSB bits are in 64 bytes granularity
+ * 1 - LSB bits are in 1K bytes granularity
+ */
+#define ICE_64_BYTE_GRANULARITY 0
+#define ICE_KBYTE_GRANULARITY BIT(11)
+#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
+#define ICE_MAX_BURST_SIZE_ALLOWED \
+ ((BIT(11) - 1) * 1024) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
+ ((BIT(11) - 1) * 64) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
+
+#define ICE_RL_PROF_FREQUENCY 446000000
+#define ICE_RL_PROF_ACCURACY_BYTES 128
+#define ICE_RL_PROF_MULTIPLIER 10000
+#define ICE_RL_PROF_TS_MULTIPLIER 32
+#define ICE_RL_PROF_FRACTION 512
+
+/* BW rate limit profile parameters list entry along
+ * with bandwidth maintained per layer in port info
+ */
+struct ice_aqc_rl_profile_info {
+ struct ice_aqc_rl_profile_elem profile;
+ struct list_head list_entry;
+ u32 bw; /* requested */
+ u16 prof_id_ref; /* profile ID to node association ref count */
+};
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
@@ -48,4 +78,13 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 1acdd43a2edd..77d211ea3aae 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -416,8 +416,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
} else {
/* update with new HW VSI num */
- if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
- tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index cb123fbe30be..fa14b9545dab 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,11 +14,6 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-/* VSI queue context structure */
-struct ice_q_ctx {
- u16 q_handle;
-};
-
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 33dd103035dc..2c212f64d99f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -5,8 +5,13 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
#include "ice.h"
#include "ice_dcb_lib.h"
+#include "ice_xsk.h"
#define ICE_RX_HDR_SIZE 256
@@ -19,7 +24,10 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- dev_kfree_skb_any(tx_buf->skb);
+ if (ice_ring_is_xdp(ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buf, dma),
@@ -51,6 +59,11 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ ice_xsk_clean_xdp_ring(tx_ring);
+ goto tx_skip_free;
+ }
+
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buf)
return;
@@ -59,6 +72,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
+tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
/* Zero out the descriptor ring */
@@ -136,8 +150,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ if (ice_ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -188,12 +205,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.pkts += total_pkts;
- u64_stats_update_end(&tx_ring->syncp);
- tx_ring->q_vector->tx.total_bytes += total_bytes;
- tx_ring->q_vector->tx.total_pkts += total_pkts;
+
+ ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
+
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -273,6 +289,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->xsk_umem) {
+ ice_xsk_clean_rx_ring(rx_ring);
+ goto rx_skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -289,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
- ICE_RXBUF_2048, DMA_FROM_DEVICE);
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
/* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
@@ -300,6 +322,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0;
}
+rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
@@ -319,6 +342,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
void ice_free_rx_ring(struct ice_ring *rx_ring)
{
ice_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
@@ -363,6 +390,15 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->q_index))
+ goto err;
return 0;
err:
@@ -372,34 +408,110 @@ err:
}
/**
- * ice_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
*/
-static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
- rx_ring->next_to_use = val;
+ return 0;
+}
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
+/**
+ * ice_run_xdp - Executes an XDP program on initialized xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = ICE_XDP_PASS;
+ struct ice_ring *xdp_ring;
+ u32 act;
- /* QRX_TAIL will be updated with any tail value, but hardware ignores
- * the lower 3 bits. This makes it so we only bump tail on meaningful
- * boundaries. Also, this allows us to bump tail on intervals of 8 up to
- * the budget depending on the current traffic load.
- */
- val &= ~0x7;
- if (prev_ntu != val) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
}
+
+ return result;
+}
+
+/**
+ * ice_xdp_xmit - submit packets to XDP ring for transmission
+ * @dev: netdev
+ * @n: number of XDP frames to be transmitted
+ * @frames: XDP frames to be transmitted
+ * @flags: transmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ */
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *xdp_ring;
+ int drops = 0, i;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ if (err != ICE_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ice_xdp_ring_update_tail(xdp_ring);
+
+ return n - drops;
}
/**
@@ -423,28 +535,28 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
}
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ice_rx_offset(rx_ring);
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -486,7 +598,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- ICE_RXBUF_2048,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -557,9 +669,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
-#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -572,7 +681,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
- if (rx_buf->page_offset > last_offset)
+#define ICE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
#endif /* PAGE_SIZE < 8192) */
@@ -590,6 +701,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
/**
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: buffer containing page to add
* @skb: sk_buff to place the data into
* @size: packet length from rx_desc
@@ -599,13 +711,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
if (!size)
@@ -679,10 +791,64 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
}
/**
+ * ice_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *
+ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
+#if L1_CACHE_BYTES < 128
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
+#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* must to record Rx queue, otherwise OS features such as
+ * symmetric queue won't work
+ */
+ skb_record_rx_queue(skb, rx_ring->q_index);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+
+ return skb;
+}
+
+/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -690,16 +856,16 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
*/
static struct sk_buff *
ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch((u8 *)va + L1_CACHE_BYTES);
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
#endif /* L1_CACHE_BYTES */
/* allocate a skb to store the frags */
@@ -712,10 +878,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* Determine available headroom for copy */
headlen = size;
if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -723,7 +890,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE >= 8192)
unsigned int truesize = SKB_DATA_ALIGN(size);
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size, truesize);
@@ -745,11 +912,18 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
- * This function will clean up the contents of the rx_buf. It will
- * either recycle the buffer or unmap it and free the associated resources.
+ * This function will update next_to_clean and then clean up the contents
+ * of the rx_buf. It will either recycle the buffer or unmap it and free
+ * the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
if (!rx_buf)
return;
@@ -759,8 +933,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
+ ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
}
@@ -770,227 +945,31 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_cleanup_headers - Correct empty headers
- * @skb: pointer to current skb being fixed
- *
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- */
-static bool ice_cleanup_headers(struct sk_buff *skb)
-{
- /* if eth_skb_pad returns an error the skb was freed */
- if (eth_skb_pad(skb))
- return true;
-
- return false;
-}
-
-/**
- * ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
-{
- return !!(rx_desc->wb.status_error0 &
- cpu_to_le16(stat_err_bits));
-}
-
-/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
/* place skb in next buffer to be received */
- rx_ring->rx_buf[ntc].skb = skb;
+ rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- */
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
-{
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
- * ice_rx_hash - set the hash value in the skb
- * @rx_ring: descriptor ring
- * @rx_desc: specific descriptor
- * @skb: pointer to current skb
- * @rx_ptype: the ptype value from the descriptor
- */
-static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 rx_ptype)
-{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
- u32 hash;
-
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
- return;
-
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
-}
-
-/**
- * ice_rx_csum - Indicate in skb if checksum is good
- * @ring: the ring we care about
- * @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
- * @ptype: the packet type decoded by hardware
- *
- * skb->protocol must be set before this function is called
- */
-static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
-{
- struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
- bool ipv4, ipv6;
-
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
- skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
- return;
-
- /* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
- return;
-
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
-
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
- goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
- goto checksum_fail;
-
- /* check for L4 errors and handle packets that were not able to be
- * checksummed due to arrival speed
- */
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
- goto checksum_fail;
-
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- default:
- break;
- }
- return;
-
-checksum_fail:
- ring->vsi->back->hw_csum_rx_error++;
-}
-
-/**
- * ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: Rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, protocol, and
- * other fields within the skb.
- */
-static void
-ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
-{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
- ice_rx_csum(rx_ring, skb, rx_desc, ptype);
-}
-
-/**
- * ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: Rx ring in play
- * @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
- *
- * This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without VLAN tag)
- */
-static void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
-{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1006,8 +985,13 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog = NULL;
+ struct xdp_buff xdp;
bool failure;
+ xdp.rxq = &rx_ring->xdp_rxq;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1042,10 +1026,57 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ if (!size) {
+ xdp.data = NULL;
+ xdp.data_end = NULL;
+ xdp.data_hard_start = NULL;
+ xdp.data_meta = NULL;
+ goto construct_skb;
+ }
+
+ xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
+ xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
+ xdp.data_meta = xdp.data;
+ xdp.data_end = xdp.data + size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ goto construct_skb;
+ }
+
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ rcu_read_unlock();
+ if (!xdp_res)
+ goto construct_skb;
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
+ size);
+#endif
+ xdp_xmit |= xdp_res;
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ } else {
+ rx_buf->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_pkts++;
+
+ cleaned_count++;
+ ice_put_rx_buf(rx_ring, rx_buf);
+ continue;
+construct_skb:
if (skb)
- ice_add_rx_frag(rx_buf, skb, size);
+ ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+ else if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
- skb = ice_construct_skb(rx_ring, rx_buf, size);
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1072,10 +1103,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (ice_test_staterr(rx_desc, stat_err_bits))
vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
- /* correct empty headers and pad skb if needed (to make valid
- * ethernet frame
- */
- if (ice_cleanup_headers(skb)) {
+ /* pad the skb if needed, to make a valid ethernet frame */
+ if (eth_skb_pad(skb)) {
skb = NULL;
continue;
}
@@ -1099,13 +1128,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
- /* update queue and vector specific stats */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.pkts += total_rx_pkts;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ if (xdp_prog)
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1483,9 +1509,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx)
- if (!ice_clean_tx_irq(ring, budget))
+ ice_for_each_ring(ring, q_vector->tx) {
+ bool wd = ring->xsk_umem ?
+ ice_clean_tx_irq_zc(ring, budget) :
+ ice_clean_tx_irq(ring, budget);
+
+ if (!wd)
clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (unlikely(budget <= 0))
@@ -1505,7 +1536,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
- cleaned = ice_clean_rx_irq(ring, budget_per_ring);
+ /* A dedicated path for zero-copy allows making a single
+ * comparison in the irq context instead of many inside the
+ * ice_clean_rx_irq function and makes the codebase cleaner.
+ */
+ cleaned = ring->xsk_umem ?
+ ice_clean_rx_irq_zc(ring, budget_per_ring) :
+ ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1527,17 +1564,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
return min_t(int, work_done, budget - 1);
}
-/* helper function for building cmd/type/offset */
-static __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
-{
- return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
- (td_cmd << ICE_TXD_QW1_CMD_S) |
- (td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
- (td_tag << ICE_TXD_QW1_L2TAG1_S));
-}
-
/**
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
@@ -1689,9 +1715,9 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
i = 0;
/* write last descriptor with RS and EOP bits */
- td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag);
+ td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
+ td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 94a9280193e2..a84cc0e6dd27 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,8 +4,12 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include "ice_type.h"
+
#define ICE_DFLT_IRQ_WORK 256
+#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
@@ -22,6 +26,71 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
+/* Attempt to maximize the headroom available for incoming frames. We use a 2K
+ * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
+ * This leaves us with 512 bytes of room. From that we need to deduct the
+ * space needed for the shared info and the padding needed to IP align the
+ * frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define ICE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+
+/**
+ * ice_compute_pad - compute the padding
+ * rx_buf_len: buffer length
+ *
+ * Figure out the size of half page based on given buffer length and
+ * then subtract the skb_shared_info followed by subtraction of the
+ * actual buffer length; this in turn results in the actual space that
+ * is left for padding usage
+ */
+static inline int ice_compute_pad(int rx_buf_len)
+{
+ int half_page_size;
+
+ half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
+}
+
+/**
+ * ice_skb_pad - determine the padding that we can supply
+ *
+ * Figure out the right Rx buffer size and based on that calculate the
+ * padding
+ */
+static inline int ice_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (ICE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = ICE_RXBUF_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ice_compute_pad(rx_buf_len);
+}
+
+#define ICE_SKB_PAD ice_skb_pad()
+#else
+#define ICE_2K_TOO_SMALL_WITH_PADDING false
+#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -49,12 +118,24 @@
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_XDP_PASS 0
+#define ICE_XDP_CONSUMED BIT(0)
+#define ICE_XDP_TX BIT(1)
+#define ICE_XDP_REDIR BIT(2)
+
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
+#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *raw_buf; /* used for XDP */
+ };
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
@@ -76,9 +157,17 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ice_q_stats {
@@ -198,18 +287,44 @@ struct ice_ring {
};
struct rcu_head rcu; /* to avoid race on free */
+ struct bpf_prog *xdp_prog;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca;
+ /* CL3 - 3rd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u8 flags;
dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
-#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
+static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
+}
+
+static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
+}
+
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
@@ -230,6 +345,19 @@ struct ice_ring_container {
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
+
+union ice_32b_rx_flex_desc;
+
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
new file mode 100644
index 000000000000..35bbc4ff603c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_txrx_lib.h"
+
+/**
+ * ice_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ */
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+{
+ u16 prev_ntu = rx_ring->next_to_use;
+
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
+ */
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
+}
+
+/**
+ * ice_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ */
+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+{
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * ice_rx_hash - set the hash value in the skb
+ * @rx_ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: pointer to current skb
+ * @rx_ptype: the ptype value from the descriptor
+ */
+static void
+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 rx_ptype)
+{
+ struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ u32 hash;
+
+ if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
+ return;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ hash = le32_to_cpu(nic_mdid->rss_hash);
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+}
+
+/**
+ * ice_rx_csum - Indicate in skb if checksum is good
+ * @ring: the ring we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void
+ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+
+ rx_status = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_error = rx_status;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
+ /* check if Rx checksum is enabled */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ return;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ goto checksum_fail;
+ else if (ipv6 && (rx_status &
+ (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+ goto checksum_fail;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ default:
+ break;
+ }
+ return;
+
+checksum_fail:
+ ring->vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: Rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
+{
+ ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+}
+
+/**
+ * ice_receive_skb - Send a completed packet up the stack
+ * @rx_ring: Rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: VLAN tag for packet
+ *
+ * This function sends the completed packet (via. skb) up the stack using
+ * gro receive functions (with/without VLAN tag)
+ */
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+{
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
+ * @data: packet data pointer
+ * @size: packet data size
+ * @xdp_ring: XDP ring for transmission
+ */
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+{
+ u16 i = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return ICE_XDP_CONSUMED;
+
+ tx_buf = &xdp_ring->tx_buf[i];
+ tx_buf->bytecount = size;
+ tx_buf->gso_segs = 1;
+ tx_buf->raw_buf = data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, i);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_buf->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return ICE_XDP_TX;
+}
+
+/**
+ * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
+ * @xdp: XDP buffer
+ * @xdp_ring: XDP Tx ring
+ *
+ * Returns negative on failure, 0 on success.
+ */
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return ICE_XDP_CONSUMED;
+
+ return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+}
+
+/**
+ * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ */
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & ICE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & ICE_XDP_TX) {
+ struct ice_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->q_index];
+
+ ice_xdp_ring_update_tail(xdp_ring);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
new file mode 100644
index 000000000000..ba9164dad9ae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_TXRX_LIB_H_
+#define _ICE_TXRX_LIB_H_
+#include "ice.h"
+
+/**
+ * ice_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+{
+ return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+}
+
+static inline __le64
+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ (td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << ICE_TXD_QW1_L2TAG1_S));
+}
+
+/**
+ * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ */
+static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype);
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6667d17a4206..eba8b04b8cbd 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
return test_bit(tc, &bitmap);
}
+static inline u64 round_up_64bit(u64 a, u32 b)
+{
+ return div64_long(((a) + (b) / 2), (b));
+}
+
+static inline u32 ice_round_to_num(u32 N, u32 R)
+{
+ return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
+ ((((N) + (R) - 1) / (R)) * (R)));
+}
+
/* Driver always calls main vsi_handle first */
#define ICE_MAIN_VSI_HANDLE 0
@@ -35,6 +46,8 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
+#define ICE_DBG_AQ_DESC BIT_ULL(25)
+#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
#define ICE_DBG_USER BIT_ULL(31)
@@ -272,10 +285,56 @@ enum ice_agg_type {
ICE_AGG_TYPE_QG
};
+/* Rate limit types */
+enum ice_rl_type {
+ ICE_UNKNOWN_BW = 0,
+ ICE_MIN_BW, /* for CIR profile */
+ ICE_MAX_BW, /* for EIR profile */
+ ICE_SHARED_BW /* for shared profile */
+};
+
+#define ICE_SCHED_MIN_BW 500 /* in Kbps */
+#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
+#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
+#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
-/* VSI type list entry to locate corresponding VSI/ag nodes */
+ /* Data structure for saving BW information */
+enum ice_bw_type {
+ ICE_BW_TYPE_PRIO,
+ ICE_BW_TYPE_CIR,
+ ICE_BW_TYPE_CIR_WT,
+ ICE_BW_TYPE_EIR,
+ ICE_BW_TYPE_EIR_WT,
+ ICE_BW_TYPE_SHARED,
+ ICE_BW_TYPE_CNT /* This must be last */
+};
+
+struct ice_bw {
+ u32 bw;
+ u16 bw_alloc;
+};
+
+struct ice_bw_type_info {
+ DECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT);
+ u8 generic;
+ struct ice_bw cir_bw;
+ struct ice_bw eir_bw;
+ u32 shared_bw;
+};
+
+/* VSI queue context structure for given TC */
+struct ice_q_ctx {
+ u16 q_handle;
+ u32 q_teid;
+ /* bw_t_info saves queue BW information */
+ struct ice_bw_type_info bw_t_info;
+};
+
+/* VSI type list entry to locate corresponding VSI/aggregator nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
@@ -364,6 +423,8 @@ struct ice_port_info {
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ /* List contain profile ID(s) and other params per layer */
+ struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
@@ -415,6 +476,8 @@ struct ice_hw {
u8 pf_id; /* device profile info */
+ u16 max_burst_size; /* driver sets this value */
+
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
@@ -555,6 +618,8 @@ struct ice_hw_port_stats {
};
/* Checksum and Shadow RAM pointers */
+#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_NVM_OEM_VER_OFF 0x02
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -568,6 +633,7 @@ struct ice_hw_port_stats {
#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
#define ICE_OEM_VER_SHIFT 24
#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b45797f39b2f..2ac83ad3d1a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
/**
@@ -389,7 +390,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
* by the time we get here.
*/
if (!is_pfr)
- wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1151,6 +1152,25 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * Returns true if the PF or VF is disabled, false otherwise.
+ */
+static bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ /* If the PF has been disabled, there is no need resetting VF until
+ * PF is active again. Similarly, if the VF has been disabled, this
+ * means something else is resetting the VF, so we shouldn't continue.
+ * Otherwise, set disable VF state bit for actual reset, and continue.
+ */
+ return (test_bit(__ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
* ice_reset_vf - Reset a particular VF
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1167,19 +1187,15 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again.
- */
- if (test_bit(__ICE_VF_DIS, pf->state))
- return false;
-
- /* If the VF has been disabled, this means something else is
- * resetting the VF, so we shouldn't continue. Otherwise, set
- * disable VF state bit for actual reset, and continue.
- */
- if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
+ if (ice_is_vf_disabled(vf)) {
+ dev_dbg(&pf->pdev->dev,
+ "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return true;
+ }
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, is_vflr, false);
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -1407,7 +1423,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
dev_err(dev, "This device is not capable of SR-IOV\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
@@ -1495,12 +1511,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
}
/**
- * ice_vc_dis_vf - Disable a given VF via SW reset
+ * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset
*/
-static void ice_vc_dis_vf(struct ice_vf *vf)
+static void ice_vc_reset_vf(struct ice_vf *vf)
{
ice_vc_notify_vf_reset(vf);
ice_reset_vf(vf, false);
@@ -2159,9 +2173,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vector_id = map->vector_id;
vsi_id = map->vsi_id;
- /* validate msg params */
- if (!(vector_id < pf->hw.func_caps.common_cap
- .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2540,7 +2556,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
- ice_vc_dis_vf(vf);
+ ice_vc_reset_vf(vf);
dev_info(&pf->pdev->dev,
"VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
@@ -3124,6 +3140,23 @@ out:
}
/**
+ * ice_wait_on_vf_reset
+ * @vf: The VF being resseting
+ *
+ * Poll to make sure a given VF is ready after reset
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(20);
+ }
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3146,6 +3179,15 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
vf = &pf->vf[vf_id];
+ /* Don't set MAC on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
return -EBUSY;
@@ -3167,7 +3209,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
- ice_vc_dis_vf(vf);
+ ice_vc_reset_vf(vf);
return ret;
}
@@ -3193,6 +3235,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
}
vf = &pf->vf[vf_id];
+ /* Don't set Trusted Mode on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
return -EBUSY;
@@ -3203,7 +3254,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return 0;
vf->trusted = trusted;
- ice_vc_dis_vf(vf);
+ ice_vc_reset_vf(vf);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 0d9880c8bba3..2e867ad2e81d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -38,6 +38,7 @@
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_WAIT 15
/* Specific VF states */
enum ice_vf_states {
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
new file mode 100644
index 000000000000..fcffad0069d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_type.h"
+#include "ice_xsk.h"
+#include "ice_txrx.h"
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
+ sizeof(vsi->rx_rings[q_idx]->rx_stats));
+ memset(&vsi->tx_rings[q_idx]->stats, 0,
+ sizeof(vsi->tx_rings[q_idx]->stats));
+ if (ice_is_xdp_ena_vsi(vsi))
+ memset(&vsi->xdp_rings[q_idx]->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
+ * @vsi: VSI that has netdev
+ * @q_vector: q_vector that has NAPI context
+ * @enable: true for enable, false for disable
+ */
+static void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable)
+{
+ if (!vsi->netdev || !q_vector)
+ return;
+
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+}
+
+/**
+ * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
+ * @vsi: the VSI that contains queue vector being un-configured
+ * @rx_ring: Rx ring that will have its IRQ disabled
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u16 reg;
+ u32 val;
+
+ /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
+ * here only QINT_RQCTL
+ */
+ reg = rx_ring->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+
+ if (q_vector) {
+ u16 v_idx = q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
+ ice_flush(hw);
+ synchronize_irq(pf->msix_entries[v_idx + base].vector);
+ }
+}
+
+/**
+ * ice_qvec_cfg_msix - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ u16 reg_idx = q_vector->reg_idx;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ring *ring;
+
+ ice_cfg_itr(hw, q_vector);
+
+ wr32(hw, GLINT_RATE(reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->tx.itr_idx);
+
+ ice_for_each_ring(ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->rx.itr_idx);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qvec_ena_irq - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ ice_irq_dynamic_ena(hw, vsi, q_vector);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int timeout = 50;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+ return err;
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (err)
+ return err;
+ }
+ err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ if (err)
+ return err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return 0;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ if (err)
+ goto free_buf;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(qg_buf, 0, sizeof(*qg_buf));
+ qg_buf->num_txqs = 1;
+ err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ if (err)
+ goto free_buf;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ err = ice_setup_rx_ctx(rx_ring);
+ if (err)
+ goto free_buf;
+
+ ice_qvec_cfg_msix(vsi, q_vector);
+
+ err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ if (err)
+ goto free_buf;
+
+ clear_bit(__ICE_CFG_BUSY, vsi->state);
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+free_buf:
+ kfree(qg_buf);
+ return err;
+}
+
+/**
+ * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
+ * @vsi: VSI to allocate the UMEM on
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+{
+ if (vsi->xsk_umems)
+ return 0;
+
+ vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ GFP_KERNEL);
+
+ if (!vsi->xsk_umems) {
+ vsi->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_xsk_add_umem - add a UMEM region for XDP sockets
+ * @vsi: VSI to which the UMEM will be added
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ int err;
+
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * @vsi: VSI from which the VSI will be removed
+ * @qid: Ring/qid associated with the UMEM
+ */
+static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+{
+ vsi->xsk_umems[qid] = NULL;
+ vsi->num_xsk_umems_used--;
+
+ if (vsi->num_xsk_umems_used == 0) {
+ kfree(vsi->xsk_umems);
+ vsi->xsk_umems = NULL;
+ vsi->num_xsk_umems = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
+ * @vsi: VSI to map the UMEM region
+ * @umem: UMEM to map
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = &pf->pdev->dev;
+ for (i = 0; i < umem->npgs; i++) {
+ dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ ICE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma)) {
+ dev_dbg(dev,
+ "XSK UMEM DMA mapping error on page num %d", i);
+ goto out_unmap;
+ }
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (; i > 0; i--) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -EFAULT;
+}
+
+/**
+ * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
+ * @vsi: VSI from which the UMEM will be unmapped
+ * @umem: UMEM to unmap
+ */
+static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = &pf->pdev->dev;
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_disable - disable a UMEM region
+ * @vsi: Current VSI
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+{
+ if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
+ !vsi->xsk_umems[qid])
+ return -EINVAL;
+
+ ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ ice_xsk_remove_umem(vsi, qid);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_enable - enable a UMEM region
+ * @vsi: Current VSI
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ int err;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+
+ if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ return -EBUSY;
+
+ reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ice_xsk_umem_dma_map(vsi, umem);
+ if (err)
+ return err;
+
+ err = ice_xsk_add_umem(vsi, umem, qid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * @vsi: Current VSI
+ * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ bool if_running, umem_present = !!umem;
+ int ret = 0, umem_failure = 0;
+
+ if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ ret = ice_qp_dis(vsi, qid);
+ if (ret) {
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ goto xsk_umem_if_up;
+ }
+ }
+
+ umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
+ ice_xsk_umem_disable(vsi, qid);
+
+xsk_umem_if_up:
+ if (if_running) {
+ ret = ice_qp_ena(vsi, qid);
+ if (!ret && umem_present)
+ napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ else if (ret)
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ }
+
+ if (umem_failure) {
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ umem_present ? "en" : "dis", umem_failure);
+ return umem_failure;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
+ * @zca: zero-cpoy allocator
+ * @handle: Buffer handle
+ */
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
+{
+ struct ice_rx_buf *rx_buf;
+ struct ice_ring *rx_ring;
+ struct xdp_umem *umem;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(zca, struct ice_ring, zca);
+ umem = rx_ring->xsk_umem;
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ mask = umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ rx_buf = &rx_ring->rx_buf[nta];
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = (u64)handle + umem->headroom;
+}
+
+/**
+ * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the hot path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = rx_buf->addr;
+ u64 handle, hr;
+
+ if (addr) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the slow path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, headroom;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ handle &= umem->chunk_mask;
+ headroom = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += headroom;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += headroom;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ * @alloc: the function pointer to call for allocation
+ *
+ * This function allocates a number of Rx buffers from the fill ring
+ * or the internal recycle mechanism and places them on the Rx ring.
+ *
+ * Returns false if all allocations were successful, true if any fail.
+ */
+static bool
+ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
+ bool alloc(struct ice_ring *, struct ice_rx_buf *))
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ struct ice_rx_buf *rx_buf;
+ bool ret = false;
+
+ if (!count)
+ return false;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ rx_buf = &rx_ring->rx_buf[ntu];
+
+ do {
+ if (!alloc(rx_ring, rx_buf)) {
+ ret = true;
+ break;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ rx_desc->wb.status_error0 = 0;
+
+ rx_desc++;
+ rx_buf++;
+ ntu++;
+
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ rx_buf = rx_ring->rx_buf;
+ ntu = 0;
+ }
+ } while (--count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return ret;
+}
+
+/**
+ * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_fast_zc);
+}
+
+/**
+ * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_slow_zc);
+}
+
+/**
+ * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
+ * @rx_ring: Rx ring
+ */
+static void ice_bump_ntc(struct ice_ring *rx_ring)
+{
+ int ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(ICE_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * ice_get_rx_buf_zc - Fetch the current Rx buffer
+ * @rx_ring: Rx ring
+ * @size: size of a buffer
+ *
+ * This function returns the current, received Rx buffer and does
+ * DMA synchronization.
+ *
+ * Returns a pointer to the received Rx buffer.
+ */
+static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
+{
+ struct ice_rx_buf *rx_buf;
+
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
+ size, DMA_BIDIRECTIONAL);
+
+ return rx_buf;
+}
+
+/**
+ * ice_reuse_rx_buf_zc - reuse an Rx buffer
+ * @rx_ring: Rx ring
+ * @old_buf: The buffer to recycle
+ *
+ * This function recycles a finished Rx buffer, and places it on the recycle
+ * queue (next_to_alloc).
+ */
+static void
+ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ice_rx_buf *new_buf;
+
+ new_buf = &rx_ring->rx_buf[nta++];
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ new_buf->dma = old_buf->dma & mask;
+ new_buf->dma += hr;
+
+ new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
+ new_buf->addr += hr;
+
+ new_buf->handle = old_buf->handle & mask;
+ new_buf->handle += rx_ring->xsk_umem->headroom;
+
+ old_buf->addr = NULL;
+}
+
+/**
+ * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
+ * @rx_ring: Rx ring
+ * @rx_buf: zero-copy Rx buffer
+ * @xdp: XDP buffer
+ *
+ * This function allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb on success, NULL on failure.
+ */
+static struct sk_buff *
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end -
+ xdp->data_hard_start;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+
+ return skb;
+}
+
+/**
+ * ice_run_xdp_zc - Executes an XDP program in zero-copy path
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+{
+ int err, result = ICE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring *xdp_ring;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return ICE_XDP_PASS;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
+ }
+
+ rcu_read_unlock();
+ return result;
+}
+
+/**
+ * ice_clean_rx_irq_zc - consumes packets from the hardware ring
+ * @rx_ring: AF_XDP Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns number of processed packets on success, remaining budget on failure.
+ */
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+ bool failure = 0;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ union ice_32b_rx_flex_desc *rx_desc;
+ unsigned int size, xdp_res = 0;
+ struct ice_rx_buf *rx_buf;
+ struct sk_buff *skb;
+ u16 stat_err_bits;
+ u16 vlan_tag = 0;
+ u8 rx_ptype;
+
+ if (cleaned_count >= ICE_RX_BUF_WRITE) {
+ failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc, stat_err_bits))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (!size)
+ break;
+
+ rx_buf = ice_get_rx_buf_zc(rx_ring, size);
+ if (!rx_buf->addr)
+ break;
+
+ xdp.data = rx_buf->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = rx_buf->handle;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ if (xdp_res) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ rx_buf->addr = NULL;
+ } else {
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+ }
+
+ total_rx_bytes += size;
+ total_rx_packets++;
+ cleaned_count++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ice_bump_ntc(rx_ring);
+
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc, stat_err_bits))
+ vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+
+ rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_receive_skb(rx_ring, skb, vlan_tag);
+ }
+
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+/**
+ * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: max number of frames to xmit
+ *
+ * Returns true if cleanup/transmission is done.
+ */
+static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+{
+ struct ice_tx_desc *tx_desc = NULL;
+ bool work_done = true;
+ struct xdp_desc desc;
+ dma_addr_t dma;
+
+ while (likely(budget-- > 0)) {
+ struct ice_tx_buf *tx_buf;
+
+ if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ work_done = false;
+ break;
+ }
+
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ break;
+
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
+ DMA_BIDIRECTIONAL);
+
+ tx_buf->bytecount = desc.len;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
+ 0, desc.len, 0);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ice_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return budget > 0 && work_done;
+}
+
+/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if cleanup/tranmission is done.
+ */
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+{
+ int total_packets = 0, total_bytes = 0;
+ s16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ bool xmit_done = true;
+ u32 xsk_frames = 0;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntc);
+ tx_buf = &xdp_ring->tx_buf[ntc];
+ ntc -= xdp_ring->count;
+
+ do {
+ if (!(tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets++;
+
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
+
+ tx_desc->cmd_type_offset_bsz = 0;
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+
+ if (unlikely(!ntc)) {
+ ntc -= xdp_ring->count;
+ tx_buf = xdp_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(xdp_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ } while (likely(--budget));
+
+ ntc += xdp_ring->count;
+ xdp_ring->next_to_clean = ntc;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+
+ ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
+ xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
+
+ return budget > 0 && xmit_done;
+}
+
+/**
+ * ice_xsk_wakeup - Implements ndo_xsk_wakeup
+ * @netdev: net_device
+ * @queue_id: queue to wake up
+ * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
+ *
+ * Returns negative on error, zero otherwise.
+ */
+int
+ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ u32 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_q_vector *q_vector;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *ring;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return -ENXIO;
+
+ if (queue_id >= vsi->num_txq)
+ return -ENXIO;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ return -ENXIO;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ q_vector = ring->q_vector;
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ ice_trigger_sw_intr(&vsi->back->hw, q_vector);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * @vsi: VSI to be checked
+ *
+ * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ */
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->xsk_umems)
+ return false;
+
+ for (i = 0; i < vsi->num_xsk_umems; i++) {
+ if (vsi->xsk_umems[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * @rx_ring: ring to be cleaned
+ */
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+{
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+
+ if (!rx_buf->addr)
+ continue;
+
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
+ rx_buf->addr = NULL;
+ }
+}
+
+/**
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * @xdp_ring: XDP_Tx ring
+ */
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->raw_buf)
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ else
+ xsk_frames++;
+
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
new file mode 100644
index 000000000000..3479e1de98fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_XSK_H_
+#define _ICE_XSK_H_
+#include "ice_txrx.h"
+#include "ice.h"
+
+struct ice_vsi;
+
+#ifdef CONFIG_XDP_SOCKETS
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+#else
+static inline int
+ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
+ struct xdp_umem __always_unused *umem,
+ u16 __always_unused qid)
+{
+ return -ENOTSUPP;
+}
+
+static inline void
+ice_zca_free(struct zero_copy_allocator __always_unused *zca,
+ unsigned long __always_unused handle)
+{
+}
+
+static inline int
+ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ int __always_unused budget)
+{
+ return 0;
+}
+
+static inline bool
+ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ int __always_unused budget)
+{
+ return false;
+}
+
+static inline bool
+ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
+{
+ return false;
+}
+
+static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
+{
+ return false;
+}
+
+static inline int
+ice_xsk_wakeup(struct net_device __always_unused *netdev,
+ u32 __always_unused queue_id, u32 __always_unused flags)
+{
+ return -ENOTSUPP;
+}
+
+#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
+#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+#endif /* CONFIG_XDP_SOCKETS */
+#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 48a40e4132f9..98346eb064d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5677,8 +5677,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
- ts = ns_to_timespec64(first->skb->tstamp);
- first->skb->tstamp = 0;
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ first->skb->tstamp = ktime_set(0, 0);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;
@@ -6236,7 +6236,6 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* adjust max frame to be at least the size of a standard frame */
@@ -6252,8 +6251,8 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igb_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index fd3071f55bd3..c39e921757ba 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests failing to enable both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
@@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0f2b68f4bb0f..6003dc3ff5fd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2437,8 +2437,8 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
ETH_FCS_LEN;
- dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 6105c6d1f3c9..9700527dd797 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -862,8 +862,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
- ts = ns_to_timespec64(first->skb->tstamp);
- first->skb->tstamp = 0;
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ first->skb->tstamp = ktime_set(0, 0);
context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->launch_time = 0;
@@ -2272,7 +2272,6 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
{
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct igc_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
@@ -2287,8 +2286,8 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igc_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196ae5aea..fd9f5d41b594 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -832,9 +832,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
+ int node = dev_to_node(&adapter->pdev->dev);
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count;
u8 tcs = adapter->hw_tcs;
@@ -845,10 +845,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
- }
+ cpu = cpumask_local_spread(v_idx, node);
+ node = cpu_to_node(cpu);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b22baea9d39b..25c097cd8100 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6725,7 +6725,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
(new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
- e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
@@ -8649,7 +8650,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 100ac89b345d..d6feaacfbf89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
- if (tx_ring->next_to_clean == tx_ring->next_to_use)
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 82ea55ae5053..d5b644131cff 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2959,15 +2959,16 @@ static void set_params(struct mv643xx_eth_private *mp,
static int get_phy_mode(struct mv643xx_eth_private *mp)
{
struct device *dev = mp->dev->dev.parent;
- int iface = -1;
+ phy_interface_t iface;
+ int err;
if (dev->of_node)
- iface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &iface);
/* Historical default if unspecified. We could also read/write
* the interface state in the PSC1
*/
- if (iface < 0)
+ if (!dev->of_node || err)
iface = PHY_INTERFACE_MODE_GMII;
return iface;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8f9df6efda61..a06d109c9e80 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1846,7 +1846,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
gfp_t gfp_mask)
{
- enum dma_data_direction dma_dir;
dma_addr_t phys_addr;
struct page *page;
@@ -1856,9 +1855,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
return -ENOMEM;
phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
- dma_dir = page_pool_get_dma_dir(rxq->page_pool);
- dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
- MVNETA_MAX_RX_BUF_SIZE, dma_dir);
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0;
@@ -2097,7 +2093,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (err) {
ret = MVNETA_XDP_DROPPED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
} else {
ret = MVNETA_XDP_REDIR;
}
@@ -2106,7 +2105,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- xdp_return_buff(xdp);
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2115,8 +2117,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- page_pool_recycle_direct(rxq->page_pool,
- virt_to_head_page(xdp->data));
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
ret = MVNETA_XDP_DROPPED;
break;
}
@@ -2154,7 +2158,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp->data_hard_start = data;
- xdp->data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE;
+ xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
xdp->data_end = xdp->data + data_len;
xdp_set_data_meta_invalid(xdp);
@@ -2219,7 +2223,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
/* refill descriptor with new buffer later */
skb_add_rx_frag(rxq->skb,
skb_shinfo(rxq->skb)->nr_frags,
- page, MVNETA_SKB_HEADROOM, data_len,
+ page, pp->rx_offset_correction, data_len,
PAGE_SIZE);
}
page_pool_release_page(rxq->page_pool, page);
@@ -3065,11 +3069,13 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
struct page_pool_params pp_params = {
.order = 0,
- .flags = PP_FLAG_DMA_MAP,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = cpu_to_node(0),
.dev = pp->dev->dev.parent,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = pp->rx_offset_correction,
+ .max_len = MVNETA_MAX_RX_BUF_SIZE,
};
int err;
@@ -4797,9 +4803,9 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy *comphy;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
const char *mac_from;
int tx_csum_limit;
- int phy_mode;
int err;
int cpu;
@@ -4812,10 +4818,9 @@ static int mvneta_probe(struct platform_device *pdev)
if (dev->irq == 0)
return -EINVAL;
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(dn, &phy_mode);
+ if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto err_free_irq;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 711ada7139d3..fb34fbd62088 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -16,3 +16,12 @@ config OCTEONTX2_AF
Unit's admin function manager which manages all RVU HW resources
and provides a medium to other PF/VFs to configure HW. Should be
enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+ bool "Disable caching of dynamic entries in NDC"
+ depends on OCTEONTX2_AF
+ default n
+ ---help---
+ This config option disables caching of dynamic entries such as NIX SQEs
+ , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+ NPA Aura/Pool contexts.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 06329acf9c2c..1b25948c662b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6d55e3d0b7ea..5ca788691911 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id)
}
EXPORT_SYMBOL(cgx_get_pdata);
+int cgx_get_cgxid(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -EINVAL;
+
+ return cgx->cgx_id;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
+
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
@@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 206dc5dc1df8..9343bf39cfac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -56,6 +56,11 @@
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_SMUX_RX_FRM_CTL 0x20020
+#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
@@ -63,6 +68,11 @@
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
+enum cgx_nix_stat_type {
+ NIX_STATS_RX,
+ NIX_STATS_TX,
+};
+
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
@@ -96,6 +106,7 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
int cgx_get_cgxcnt_max(void);
+int cgx_get_cgxid(void *cgxd);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
@@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fb3ba4968a9b..473d9751601f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e332e82fc066..784207bae5f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -196,4 +196,20 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+/* NDC info */
+enum ndc_idx_e {
+ NIX0_RX = 0x0,
+ NIX0_TX = 0x1,
+ NPA0_U = 0x2,
+};
+
+enum ndc_ctype_e {
+ CACHING = 0x0,
+ BYPASS = 0x1,
+};
+
+#define NDC_MAX_PORT 6
+#define NDC_READ_TRANS 0
+#define NDC_WRITE_TRANS 1
+
#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index d6f9ed8ea966..387e33fa417a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
spin_lock(&mdev->mbox_lock);
mdev->msg_size = 0;
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
+ tx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
spin_unlock(&mdev->mbox_lock);
}
EXPORT_SYMBOL(otx2_mbox_reset);
@@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- int timeout = 0, sleep = 1;
+ struct device *sender = &mbox->pdev->dev;
- while (mdev->num_msgs != mdev->msgs_acked) {
- msleep(sleep);
- timeout += sleep;
- if (timeout >= MBOX_RSP_TIMEOUT)
- return -EIO;
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
}
- return 0;
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ /* If bounce buffer is implemented copy mbox messages from
+ * bounce buffer to hw mbox memory.
+ */
+ if (mdev->mbase != hw_mbase)
+ memcpy(hw_mbase + mbox->tx_start + msgs_offset,
+ mdev->mbase + mbox->tx_start + msgs_offset,
+ mdev->msg_size);
spin_lock(&mdev->mbox_lock);
+
+ tx_hdr->msg_size = mdev->msg_size;
+
/* Reset header for next messages */
mdev->msg_size = 0;
mdev->rsp_size = 0;
@@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
/* Clear the whole msg region */
- memset(msghdr, 0, sizeof(*msghdr) + size);
+ memset(msghdr, 0, size);
/* Init message header with reset values */
msghdr->ver = OTX2_MBOX_VERSION;
mdev->msg_size += size;
@@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
u16 msgs;
+ spin_lock(&mdev->mbox_lock);
+
if (mdev->num_msgs != mdev->msgs_acked)
- return ERR_PTR(-ENODEV);
+ goto error;
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
struct mbox_msghdr *pmsg = mdev->mbase + imsg;
@@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
if (msg == pmsg) {
if (pmsg->id != prsp->id)
- return ERR_PTR(-ENODEV);
+ goto error;
+ spin_unlock(&mdev->mbox_lock);
return prsp;
}
- imsg = pmsg->next_msgoff;
- irsp = prsp->next_msgoff;
+ imsg = mbox->tx_start + pmsg->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
}
+error:
+ spin_unlock(&mdev->mbox_lock);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(otx2_mbox_get_rsp);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long ireq = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = -ENODEV;
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto exit;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *preq = mdev->mbase + ireq;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (preq->id != prsp->id)
+ goto exit;
+ if (prsp->rc) {
+ rc = prsp->rc;
+ goto exit;
+ }
+
+ ireq = mbox->tx_start + preq->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+ rc = 0;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
+
int
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 76a4575d18ff..a589748f1240 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -75,6 +75,7 @@ struct otx2_mbox {
/* Header which preceeds all mbox messages */
struct mbox_hdr {
+ u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
};
@@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp);
struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *msg);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
u16 pcifunc, u16 id);
bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
@@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -300,6 +303,12 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ u8 nix_shaping; /* Is shaping and coloring supported */
+};
+
/* CGX mbox message formats */
struct cgx_stats_rsp {
@@ -352,6 +361,7 @@ struct npa_lf_alloc_req {
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
+ u64 way_mask;
};
struct npa_lf_alloc_rsp {
@@ -442,6 +452,7 @@ struct nix_lf_alloc_req {
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ u64 way_mask;
};
struct nix_lf_alloc_rsp {
@@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp {
/* Scheduler queue list allocated at each level */
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 aggr_level; /* Traffic aggregation scheduler level */
+ u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
};
struct nix_txsch_free_req {
@@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
+#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
+#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
+#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
+#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8d6d90fdfb73..3803af9231c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -27,26 +27,45 @@ enum NPC_LID_E {
enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_IH_8_ETHER,
+ NPC_LT_LA_IH_4_ETHER,
+ NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM0 = 0xE,
+ NPC_LT_LA_CUSTOM1 = 0xF,
};
enum npc_kpu_lb_ltype {
NPC_LT_LB_ETAG = 1,
NPC_LT_LB_CTAG,
- NPC_LT_LB_STAG,
+ NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_QINQ,
NPC_LT_LB_ITAG,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_CUSTOM0 = 0xE,
+ NPC_LT_LB_CUSTOM1 = 0xF,
};
enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
NPC_LT_LC_ARP,
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+ NPC_LT_LC_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
@@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
- NPC_LT_LD_IGMP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_IGMP = 8,
NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
- NPC_LT_LD_GRE_MPLS,
- NPC_LT_LD_GRE_NSH,
- NPC_LT_LD_TU_MPLS,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_CUSTOM0 = 0xE,
+ NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
- NPC_LT_LE_TU_ETHER = 1,
- NPC_LT_LE_TU_PPP,
- NPC_LT_LE_TU_MPLS_IN_NSH,
- NPC_LT_LE_TU_3RD_NSH,
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+ NPC_LT_LE_CUSTOM0 = 0xE,
+ NPC_LT_LE_CUSTOM1 = 0xF,
};
enum npc_kpu_lf_ltype {
- NPC_LT_LF_TU_IP = 1,
- NPC_LT_LF_TU_IP6,
- NPC_LT_LF_TU_ARP,
- NPC_LT_LF_TU_MPLS_IP,
- NPC_LT_LF_TU_MPLS_IP6,
- NPC_LT_LF_TU_MPLS_ETHER,
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+ NPC_LT_LF_CUSTOM0 = 0xE,
+ NPC_LT_LF_CUSTOM1 = 0xF,
};
enum npc_kpu_lg_ltype {
- NPC_LT_LG_TU_TCP = 1,
- NPC_LT_LG_TU_UDP,
- NPC_LT_LG_TU_SCTP,
- NPC_LT_LG_TU_ICMP,
- NPC_LT_LG_TU_IGMP,
- NPC_LT_LG_TU_ICMP6,
- NPC_LT_LG_TU_ESP,
- NPC_LT_LG_TU_AH,
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+ NPC_LT_LG_CUSTOM0 = 0xE,
+ NPC_LT_LG_CUSTOM1 = 0xF,
};
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
enum npc_kpu_lh_ltype {
- NPC_LT_LH_TCP_DATA = 1,
- NPC_LT_LH_HTTP_DATA,
- NPC_LT_LH_HTTPS_DATA,
- NPC_LT_LH_PPTP_DATA,
- NPC_LT_LH_UDP_DATA,
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+ NPC_LT_LH_CUSTOM0 = 0xE,
+ NPC_LT_LH_CUSTOM1 = 0xF,
};
struct npc_kpu_profile_cam {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index b2ce957605bb..aa2727e6211a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -11,6 +11,11 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
+#define NPC_KPU_PROFILE_VER 0x0000000100050000
+
+#define NPC_IH_W 0x8000
+#define NPC_IH_UTAG 0x2000
+
#define NPC_ETYPE_IP 0x0800
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
@@ -27,6 +32,7 @@
#define NPC_ETYPE_TRANS_ETH_BR 0x6558
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
+#define NPC_ETYPE_DSA 0xdada
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -44,13 +50,19 @@
#define NPC_IPNH_NONH 59
#define NPC_IPNH_DEST 60
#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MOBILITY 135
#define NPC_IPNH_MPLS 137
+#define NPC_IPNH_HOSTID 139
+#define NPC_IPNH_SHIM6 140
+#define NPC_UDP_PORT_PTP_E 319
+#define NPC_UDP_PORT_PTP_G 320
#define NPC_UDP_PORT_GTPC 2123
#define NPC_UDP_PORT_GTPU 2152
#define NPC_UDP_PORT_VXLAN 4789
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
+#define NPC_UDP_PORT_MPLS 6635
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -72,11 +84,17 @@
#define NPC_MPLS_S 0x0100
+#define NPC_IP_TTL_MASK 0xff00
#define NPC_IP_VER_4 0x4000
#define NPC_IP_VER_6 0x6000
#define NPC_IP_VER_MASK 0xf000
#define NPC_IP_HDR_LEN_5 0x0500
#define NPC_IP_HDR_LEN_MASK 0x0f00
+#define NPC_IP_HDR_MF 0x2000
+#define NPC_IP_HDR_FRAGOFF 0x1fff
+
+#define NPC_IP6_HOP_MASK 0x00ff
+#define NPC_IP6_FRAG_FRAGOFF 0xfff8
#define NPC_GRE_F_CSUM (0x1 << 15)
#define NPC_GRE_F_ROUTE (0x1 << 14)
@@ -108,22 +126,44 @@
#define NPC_GTP_MT_G_PDU 0xff
#define NPC_GTP_MT_MASK 0xff
+#define NPC_TCP_FLAGS_FIN 0x0001
+#define NPC_TCP_FLAGS_SYN 0x0002
+#define NPC_TCP_FLAGS_RST 0x0004
+#define NPC_TCP_FLAGS_PSH 0x0008
+#define NPC_TCP_FLAGS_ACK 0x0010
+#define NPC_TCP_FLAGS_URG 0x0020
+#define NPC_TCP_FLAGS_MASK 0x003f
+
#define NPC_TCP_DATA_OFFSET_5 0x5000
#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+#define NPC_DSA_EXTEND 0x1000
+#define NPC_DSA_EDSA 0x8000
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
- NPC_S_KPU1_PKI,
+ NPC_S_KPU1_IH_NIX,
+ NPC_S_KPU1_IH,
+ NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_ITAG,
+ NPC_S_KPU2_PREHEADER,
+ NPC_S_KPU2_EXDSA,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
NPC_S_KPU3_ITAG,
+ NPC_S_KPU3_CTAG_C,
+ NPC_S_KPU3_STAG_C,
+ NPC_S_KPU3_QINQ_C,
+ NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU5_IP,
@@ -136,7 +176,12 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU6_IP6_HOP_DEST,
+ NPC_S_KPU6_IP6_ROUT,
+ NPC_S_KPU6_IP6_FRAG,
NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU7_IP6_ROUT,
+ NPC_S_KPU7_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -146,16 +191,26 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_GRE,
NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
- NPC_S_KPU9_TU_MPLS,
- NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE,
+ NPC_S_KPU9_TU_MPLS_IN_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_IP,
+ NPC_S_KPU9_TU_MPLS_IN_UDP,
+ NPC_S_KPU9_TU_NSH_IN_GRE,
+ NPC_S_KPU9_VXLAN,
+ NPC_S_KPU9_VXLANGPE,
+ NPC_S_KPU9_GENEVE,
+ NPC_S_KPU9_GTPC,
+ NPC_S_KPU9_GTPU,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
- NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE,
NPC_S_KPU11_TU_ETHER,
NPC_S_KPU11_TU_PPP,
NPC_S_KPU11_TU_MPLS_IN_NSH,
- NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU11_TU_MPLS_PL,
+ NPC_S_KPU11_TU_MPLS,
+ NPC_S_KPU11_TU_ETHER_IN_NSH,
NPC_S_KPU12_TU_IP,
NPC_S_KPU12_TU_IP6,
NPC_S_KPU12_TU_ARP,
@@ -174,135 +229,172 @@ enum npc_kpu_parser_state {
NPC_S_KPU16_PPTP_DATA,
NPC_S_KPU16_TCP_DATA,
NPC_S_KPU16_UDP_DATA,
+ NPC_S_KPU16_UDP_PTP,
NPC_S_LAST /* has to be the last item */
};
-enum npc_kpu_parser_flag {
- NPC_F_NA = 0,
- NPC_F_PKI,
- NPC_F_PKI_VLAN,
- NPC_F_PKI_ETAG,
- NPC_F_PKI_ITAG,
- NPC_F_PKI_MPLS,
- NPC_F_PKI_NSH,
- NPC_F_ETYPE_UNK,
- NPC_F_ETHER_VLAN,
- NPC_F_ETHER_ETAG,
- NPC_F_ETHER_ITAG,
- NPC_F_ETHER_MPLS,
- NPC_F_ETHER_NSH,
- NPC_F_STAG_CTAG,
- NPC_F_STAG_CTAG_UNK,
- NPC_F_STAG_STAG_CTAG,
- NPC_F_STAG_STAG_STAG,
- NPC_F_QINQ_CTAG,
- NPC_F_QINQ_CTAG_UNK,
- NPC_F_QINQ_QINQ_CTAG,
- NPC_F_QINQ_QINQ_QINQ,
- NPC_F_BTAG_ITAG,
- NPC_F_BTAG_ITAG_STAG,
- NPC_F_BTAG_ITAG_CTAG,
- NPC_F_BTAG_ITAG_UNK,
- NPC_F_ETAG_CTAG,
- NPC_F_ETAG_BTAG_ITAG,
- NPC_F_ETAG_STAG,
- NPC_F_ETAG_QINQ,
- NPC_F_ETAG_ITAG,
- NPC_F_ETAG_ITAG_STAG,
- NPC_F_ETAG_ITAG_CTAG,
- NPC_F_ETAG_ITAG_UNK,
- NPC_F_ITAG_STAG_CTAG,
- NPC_F_ITAG_STAG,
- NPC_F_ITAG_CTAG,
- NPC_F_MPLS_4_LABELS,
- NPC_F_MPLS_3_LABELS,
- NPC_F_MPLS_2_LABELS,
- NPC_F_IP_HAS_OPTIONS,
- NPC_F_IP_IP_IN_IP,
- NPC_F_IP_6TO4,
- NPC_F_IP_MPLS_IN_IP,
- NPC_F_IP_UNK_PROTO,
- NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
- NPC_F_IP_6TO4_HAS_OPTIONS,
- NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
- NPC_F_IP6_HAS_EXT,
- NPC_F_IP6_TUN_IP6,
- NPC_F_IP6_MPLS_IN_IP,
- NPC_F_TCP_HAS_OPTIONS,
- NPC_F_TCP_HTTP,
- NPC_F_TCP_HTTPS,
- NPC_F_TCP_PPTP,
- NPC_F_TCP_UNK_PORT,
- NPC_F_TCP_HTTP_HAS_OPTIONS,
- NPC_F_TCP_HTTPS_HAS_OPTIONS,
- NPC_F_TCP_PPTP_HAS_OPTIONS,
- NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- NPC_F_UDP_VXLAN,
- NPC_F_UDP_VXLAN_NOVNI,
- NPC_F_UDP_VXLAN_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE,
- NPC_F_UDP_VXLANGPE_NSH,
- NPC_F_UDP_VXLANGPE_MPLS,
- NPC_F_UDP_VXLANGPE_NOVNI,
- NPC_F_UDP_VXLANGPE_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
- NPC_F_UDP_VXLANGPE_UNK,
- NPC_F_UDP_VXLANGPE_NONP,
- NPC_F_UDP_GTP_GTPC,
- NPC_F_UDP_GTP_GTPU_G_PDU,
- NPC_F_UDP_GTP_GTPU_UNK,
- NPC_F_UDP_UNK_PORT,
- NPC_F_UDP_GENEVE,
- NPC_F_UDP_GENEVE_OAM,
- NPC_F_UDP_GENEVE_CRI_OPT,
- NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- NPC_F_GRE_NVGRE,
- NPC_F_GRE_HAS_SRE,
- NPC_F_GRE_HAS_CSUM,
- NPC_F_GRE_HAS_KEY,
- NPC_F_GRE_HAS_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY,
- NPC_F_GRE_HAS_CSUM_SEQ,
- NPC_F_GRE_HAS_KEY_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY_SEQ,
- NPC_F_GRE_HAS_ROUTE,
- NPC_F_GRE_UNK_PROTO,
- NPC_F_GRE_VER1,
- NPC_F_GRE_VER1_HAS_SEQ,
- NPC_F_GRE_VER1_HAS_ACK,
- NPC_F_GRE_VER1_HAS_SEQ_ACK,
- NPC_F_GRE_VER1_UNK_PROTO,
- NPC_F_TU_ETHER_UNK,
- NPC_F_TU_ETHER_CTAG,
- NPC_F_TU_ETHER_CTAG_UNK,
- NPC_F_TU_ETHER_STAG_CTAG,
- NPC_F_TU_ETHER_STAG_CTAG_UNK,
- NPC_F_TU_ETHER_STAG,
- NPC_F_TU_ETHER_STAG_UNK,
- NPC_F_TU_ETHER_QINQ_CTAG,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK,
- NPC_F_TU_ETHER_QINQ,
- NPC_F_TU_ETHER_QINQ_UNK,
- NPC_F_LAST /* has to be the last item */
+enum npc_kpu_la_uflag {
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
+};
+enum npc_kpu_la_lflag {
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_L_WITH_NSH,
+};
+
+enum npc_kpu_lb_uflag {
+ NPC_F_LB_U_UNK_ETYPE = 0x80,
+ NPC_F_LB_U_MORE_TAG = 0x40,
+};
+enum npc_kpu_lb_lflag {
+ NPC_F_LB_L_WITH_CTAG = 1,
+ NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_L_WITH_QINQ_CTAG,
+ NPC_F_LB_L_WITH_QINQ_QINQ,
+ NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_L_DSA,
+ NPC_F_LB_L_DSA_VLAN,
+ NPC_F_LB_L_EDSA,
+ NPC_F_LB_L_EDSA_VLAN,
+ NPC_F_LB_L_EXDSA,
+ NPC_F_LB_L_EXDSA_VLAN,
+};
+
+enum npc_kpu_lc_uflag {
+ NPC_F_LC_U_UNK_PROTO = 0x10,
+ NPC_F_LC_U_IP_FRAG = 0x20,
+ NPC_F_LC_U_IP6_FRAG = 0x40,
+};
+enum npc_kpu_lc_lflag {
+ NPC_F_LC_L_IP_IN_IP = 1,
+ NPC_F_LC_L_6TO4,
+ NPC_F_LC_L_MPLS_IN_IP,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ NPC_F_LC_L_EXT_HOP,
+ NPC_F_LC_L_EXT_DEST,
+ NPC_F_LC_L_EXT_ROUT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ NPC_F_LC_L_EXT_HOSTID,
+ NPC_F_LC_L_EXT_SHIM6,
+};
+
+enum npc_kpu_ld_lflag {
+ NPC_F_LD_L_TCP_UNK_PORT = 1,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_LD_L_UDP_UNK_PORT,
+ NPC_F_LD_L_GRE_NVGRE,
+ NPC_F_LD_L_GRE_HAS_SRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ NPC_F_LD_L_GRE_VER1,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ NPC_F_LD_L_MPLS_2_LABELS,
+};
+
+enum npc_kpu_le_lflag {
+ NPC_F_LE_L_VXLAN_NOVNI,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ NPC_F_LE_L_GENEVE_OAM,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ NPC_F_LE_L_GTPU_G_PDU,
+ NPC_F_LE_L_GTPU_UNK,
+};
+
+enum npc_kpu_lf_uflag {
+ NPC_F_LF_U_UNK_ETYPE = 0x10,
+ NPC_F_LF_U_HAS_TAG = 0x20,
+};
+
+enum npc_kpu_lf_lflag {
+ NPC_F_LF_L_WITH_CTAG = 1,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ NPC_F_LF_L_WITH_STAG,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ NPC_F_LF_L_WITH_QINQ,
+};
+
+enum npc_kpu_lg_uflag {
+ NPC_F_LG_U_UNK_IP_PROTO = 0x10,
+ NPC_F_LG_U_IP_HAS_OPTIONS = 0x20,
+ NPC_F_LG_U_IP6_HAS_EXT = 0x40,
+};
+
+enum npc_kpu_lh_uflag {
+ NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80,
+};
+
+enum npc_kpu_lh_lflag {
+ NPC_F_LH_L_TCP_HTTP = 1,
+ NPC_F_LH_L_TCP_HTTPS,
+ NPC_F_LH_L_TCP_PPTP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ NPC_F_LH_L_UDP_UNK_PORT,
};
enum npc_kpu_err_code {
NPC_EC_NOERR = 0, /* has to be zero */
NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
NPC_EC_L2_K1,
NPC_EC_L2_K2,
NPC_EC_L2_K3,
NPC_EC_L2_K3_ETYPE_UNK,
- NPC_EC_L2_MPLS_2MANY,
NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
NPC_EC_VXLAN,
NPC_EC_NVGRE,
NPC_EC_GRE,
NPC_EC_GRE_VER1,
NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
NPC_EC_LAST /* has to be the last item */
};
@@ -328,5282 +420,12598 @@ enum NPC_ERRLEV_E {
static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 20, 24, 28, 0, 0,
+ NPC_S_KPU1_IH_NIX, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
};
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_DSA,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0xfc00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0400,
+ 0xfe00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_EXTEND,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU4_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_MPLS,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU6_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU7_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLAN,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLANGPE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GENEVE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPC,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_E,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_G,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_MPLS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_GRE_F_ROUTE,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x2001,
+ 0xef7f,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0001,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLAN_I,
+ NPC_VXLAN_I,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_S_KPU9_GTPC, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU13_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU14_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU16_TCP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 1, 0,
+ NPC_S_KPU3_DSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 8, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 4, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 2, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 0, 0,
+ NPC_S_KPU2_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_L2_K1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 1,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 16, 20, 24, 0, 0,
+ NPC_S_KPU3_ITAG, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 20, 0, 0,
+ NPC_S_KPU3_STAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 16, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA_VLAN,
+ NPC_F_LB_L_EDSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN,
+ NPC_F_LB_L_EXDSA_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0xffff, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x2001, 0xef7f, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0001, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 7, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 7, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 6, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 4, 0,
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU3_ITAG, 12, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLANGPE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GENEVE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPC, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPU, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_NVGRE,
+ NPC_F_LD_L_GRE_NVGRE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_NVGRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU5_MPLS, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU5_NSH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- 0, 0xf, 0, 2,
+ NPC_ERRLEV_LE, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ NPC_F_LE_L_VXLAN_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_VXLAN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPC,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_G_PDU,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 4, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 8, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_NSH_UNK,
+ 6, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_PPP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ NPC_F_LG_U_IP6_HAS_EXT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
-};
-
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_PPTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_UDP,
+ NPC_F_LH_L_UDP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_L4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
static struct npc_kpu_profile_action kpu16_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e581091c09c4..5c190c3ce898 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static void rvu_setup_hw_capabilities(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
+ hw->cap.nix_fixed_txschq_mapping = false;
+ hw->cap.nix_shaping = true;
+ hw->cap.nix_tx_link_bp = true;
+ hw->cap.nix_rx_multicast = true;
+
+ if (is_rvu_96xx_B0(rvu)) {
+ hw->cap.nix_fixed_txschq_mapping = true;
+ hw->cap.nix_txsch_per_cgx_lmac = 4;
+ hw->cap.nix_txsch_per_lbk_lmac = 132;
+ hw->cap.nix_txsch_per_sdp_lmac = 76;
+ hw->cap.nix_shaping = false;
+ hw->cap.nix_tx_link_bp = false;
+ if (is_rvu_96xx_A0(rvu))
+ hw->cap.nix_rx_multicast = false;
+ }
+}
+
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
- unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ unsigned long timeout = jiffies + usecs_to_jiffies(10000);
void __iomem *reg;
u64 reg_val;
@@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
if (!zero && (reg_val & mask))
return 0;
usleep_range(1, 5);
- timeout--;
}
return -EBUSY;
}
@@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
@@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
- struct ready_msg_rsp *rsp)
+int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
{
return 0;
}
@@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
return 0;
}
-static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
- struct rsrc_detach *detach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
{
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
@@ -1171,9 +1192,9 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
- struct rsrc_attach *attach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
{
u16 pcifunc = attach->hdr.pcifunc;
int err;
@@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
- struct msix_offset_rsp *rsp)
+int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
u16 vf, numvfs;
@@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
+ struct get_hw_cap_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
+ rsp->nix_shaping = hw->cap.nix_shaping;
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
- if (req_hdr->num_msgs == 0)
+ if (mw->mbox_wrk[devid].num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < req_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
@@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, devid,
+ msg->id, rvu_get_pf(msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+ mw->mbox_wrk[devid].num_msgs = 0;
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
@@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
- if (rsp_hdr->num_msgs == 0) {
+ if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
@@ -1560,6 +1593,7 @@ end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
+ mw->mbox_wrk_up[devid].up_num_msgs = 0;
otx2_mbox_reset(mbox, devid);
}
@@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ /*The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+
+ if (hdr->num_msgs) {
+ mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ }
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
+ if (hdr->num_msgs) {
+ mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
}
}
@@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (vfs > chans)
vfs = chans;
- /* AF's VFs work in pairs and talk over consecutive loopback channels.
- * Thus we want to enable maximum even number of VFs. In case
- * odd number of VFs are available then the last VF on the list
- * remains disabled.
- */
- if (vfs & 0x1) {
- dev_warn(&pdev->dev,
- "Number of VFs should be even. Enabling %d out of %d.\n",
- vfs - 1, vfs);
- vfs--;
- }
-
if (!vfs)
return 0;
@@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_reset_all_blocks(rvu);
+ rvu_setup_hw_capabilities(rvu);
+
err = rvu_setup_hw_resources(rvu);
if (err)
goto err_release_regions;
@@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_irq;
+ /* Initialize debugfs */
+ rvu_dbg_init(rvu);
+
return 0;
err_irq:
rvu_unregister_interrupts(rvu);
@@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev)
{
struct rvu *rvu = pci_get_drvdata(pdev);
+ rvu_dbg_exit(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c9d60b0554c0..51c206f4fe6f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -35,9 +35,36 @@
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+#ifdef CONFIG_DEBUG_FS
+struct dump_ctx {
+ int lf;
+ int id;
+ bool all;
+};
+
+struct rvu_debugfs {
+ struct dentry *root;
+ struct dentry *cgx_root;
+ struct dentry *cgx;
+ struct dentry *lmac;
+ struct dentry *npa;
+ struct dentry *nix;
+ struct dentry *npc;
+ struct dump_ctx npa_aura_ctx;
+ struct dump_ctx npa_pool_ctx;
+ struct dump_ctx nix_cq_ctx;
+ struct dump_ctx nix_rq_ctx;
+ struct dump_ctx nix_sq_ctx;
+ int npa_qsize_id;
+ int nix_qsize_id;
+};
+#endif
+
struct rvu_work {
struct work_struct work;
struct rvu *rvu;
+ int num_msgs;
+ int up_num_msgs;
};
struct rsrc_bmap {
@@ -99,6 +126,7 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
};
/* Structure for per RVU func info ie PF/VF */
@@ -151,15 +179,20 @@ struct rvu_pfvf {
struct mcam_entry entry;
int rxvlan_index;
bool rxvlan;
+
+ bool cgx_in_use; /* this PF/VF using CGX? */
+ int cgx_users; /* number of cgx users - used only by PFs */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
-#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
+#define NIX_TXSCHQ_FREE BIT_ULL(1)
+#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
u32 *pfvf_map;
};
@@ -193,6 +226,21 @@ struct nix_hw {
struct nix_lso lso;
};
+/* RVU block's capabilities or functionality,
+ * which vary by silicon version/skew.
+ */
+struct hw_cap {
+ /* Transmit side supported functionality */
+ u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
+ u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
+ u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
+ u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
+ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
+ bool nix_rx_multicast; /* Rx packet replication support */
+};
+
struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */
@@ -204,7 +252,7 @@ struct rvu_hwinfo {
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
-
+ struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
struct npc_pkind pkind;
@@ -261,8 +309,13 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
+ struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+
+#ifdef CONFIG_DEBUG_FS
+ struct rvu_debugfs rvu_dbg;
+#endif
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
-static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+/* Silicon revisions */
+static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
@@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
+static inline bool is_rvu_96xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
@@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+#define M(_name, _id, fn_name, req, rsp) \
+int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
+MBOX_MESSAGES
+#undef M
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
- struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
+ int rxtxflag, u64 *stat);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
-int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
- struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
- struct npa_lf_alloc_req *req,
- struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
/* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
@@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
- struct nix_lf_alloc_req *req,
- struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
- struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
- struct nix_txsch_alloc_req *req,
- struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
- struct nix_txsch_free_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
- struct nix_txschq_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
- struct nix_vtag_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
- struct nix_rss_flowkey_cfg *req,
- struct nix_rss_flowkey_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
- struct nix_set_mac_addr *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
- struct nix_mark_format_cfg *req,
- struct nix_mark_format_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
- struct nix_lso_format_cfg *req,
- struct nix_lso_format_cfg_rsp *rsp);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
- struct npc_mcam_alloc_entry_req *req,
- struct npc_mcam_alloc_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
- struct npc_mcam_free_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
- struct npc_mcam_write_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
- struct npc_mcam_shift_entry_req *req,
- struct npc_mcam_shift_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
- struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req,
- struct npc_mcam_oper_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
- struct npc_mcam_alloc_and_write_entry_req *req,
- struct npc_mcam_alloc_and_write_entry_rsp *rsp);
-int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
- struct npc_get_kex_cfg_rsp *rsp);
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+
+#ifdef CONFIG_DEBUG_FS
+void rvu_dbg_init(struct rvu *rvu);
+void rvu_dbg_exit(struct rvu *rvu);
+#else
+static inline void rvu_dbg_init(struct rvu *rvu) {}
+static inline void rvu_dbg_exit(struct rvu *rvu) {}
+#endif
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 7d7133c5f799..0bbb2eb1446e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "rvu_reg.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
-static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ unsigned long pfmap;
+
+ pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
+
+ /* Assumes only one pf mapped to a cgx lmac port */
+ if (!pfmap)
+ return -ENODEV;
+ else
+ return find_first_bit(&pfmap, 16);
+}
+
+static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
}
@@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
+ mutex_init(&rvu->cgx_cfg_lock);
+
/* Ensure event handler registration is completed, before
* we turn on the links
*/
@@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
+ if (enable)
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ else
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
@@ -562,3 +596,94 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
+
+/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
+ * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
+ */
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
+ int index, int rxtxflag, u64 *stat)
+{
+ struct rvu_block *block;
+ int blkaddr;
+ u16 pcifunc;
+ int pf, lf;
+
+ if (!cgxd || !rvu)
+ return -EINVAL;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ if (pf < 0)
+ return pf;
+
+ /* Assumes LF of a PF and all of its VF belongs to the same
+ * NIX block
+ */
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+
+ *stat = 0;
+ for (lf = 0; lf < block->lf.max; lf++) {
+ /* Check if a lf is attached to this PF or one of its VFs */
+ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ if (rxtxflag == NIX_STATS_RX)
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_STATX(lf, index));
+ else
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_TX_STATX(lf, index));
+ }
+
+ return 0;
+}
+
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ struct rvu_pfvf *parent_pf, *pfvf;
+ int cgx_users, err = 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ mutex_lock(&rvu->cgx_cfg_lock);
+
+ if (start && pfvf->cgx_in_use)
+ goto exit; /* CGX is already started hence nothing to do */
+ if (!start && !pfvf->cgx_in_use)
+ goto exit; /* CGX is already stopped hence nothing to do */
+
+ if (start) {
+ cgx_users = parent_pf->cgx_users;
+ parent_pf->cgx_users++;
+ } else {
+ parent_pf->cgx_users--;
+ cgx_users = parent_pf->cgx_users;
+ }
+
+ /* Start CGX when first of all NIXLFs is started.
+ * Stop CGX when last of all NIXLFs is stopped.
+ */
+ if (!cgx_users) {
+ err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ start);
+ if (err) {
+ dev_err(rvu->dev, "Unable to %s CGX\n",
+ start ? "start" : "stop");
+ /* Revert the usage count in case of error */
+ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
+ : parent_pf->cgx_users + 1;
+ goto exit;
+ }
+ }
+ pfvf->cgx_in_use = start;
+exit:
+ mutex_unlock(&rvu->cgx_cfg_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
new file mode 100644
index 000000000000..77adad4adb1b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -0,0 +1,1711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "npc.h"
+
+#define DEBUGFS_DIR_NAME "octeontx2"
+
+enum {
+ CGX_STAT0,
+ CGX_STAT1,
+ CGX_STAT2,
+ CGX_STAT3,
+ CGX_STAT4,
+ CGX_STAT5,
+ CGX_STAT6,
+ CGX_STAT7,
+ CGX_STAT8,
+ CGX_STAT9,
+ CGX_STAT10,
+ CGX_STAT11,
+ CGX_STAT12,
+ CGX_STAT13,
+ CGX_STAT14,
+ CGX_STAT15,
+ CGX_STAT16,
+ CGX_STAT17,
+ CGX_STAT18,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+static char *cgx_rx_stats_fields[] = {
+ [CGX_STAT0] = "Received packets",
+ [CGX_STAT1] = "Octets of received packets",
+ [CGX_STAT2] = "Received PAUSE packets",
+ [CGX_STAT3] = "Received PAUSE and control packets",
+ [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
+ [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
+ [CGX_STAT6] = "Packets dropped due to RX FIFO full",
+ [CGX_STAT7] = "Octets dropped due to RX FIFO full",
+ [CGX_STAT8] = "Error packets",
+ [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
+ [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
+ [CGX_STAT11] = "NCSI-bound packets dropped",
+ [CGX_STAT12] = "NCSI-bound octets dropped",
+};
+
+static char *cgx_tx_stats_fields[] = {
+ [CGX_STAT0] = "Packets dropped due to excessive collisions",
+ [CGX_STAT1] = "Packets dropped due to excessive deferral",
+ [CGX_STAT2] = "Multiple collisions before successful transmission",
+ [CGX_STAT3] = "Single collisions before successful transmission",
+ [CGX_STAT4] = "Total octets sent on the interface",
+ [CGX_STAT5] = "Total frames sent on the interface",
+ [CGX_STAT6] = "Packets sent with an octet count < 64",
+ [CGX_STAT7] = "Packets sent with an octet count == 64",
+ [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT9] = "Packets sent with an octet count of 128-255",
+ [CGX_STAT10] = "Packets sent with an octet count of 256-511",
+ [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
+ [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
+ [CGX_STAT13] = "Packets sent with an octet count of > 1518",
+ [CGX_STAT14] = "Packets sent to a broadcast DMAC",
+ [CGX_STAT15] = "Packets sent to the multicast DMAC",
+ [CGX_STAT16] = "Transmit underflow and were truncated",
+ [CGX_STAT17] = "Control/PAUSE packets sent",
+};
+
+#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
+ blk_addr, NDC_AF_CONST) & 0xFF)
+
+#define rvu_dbg_NULL NULL
+#define rvu_dbg_open_NULL NULL
+
+#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
+static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, rvu_dbg_##read_op, inode->i_private); \
+} \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = rvu_dbg_open_##name, \
+ .read = seq_read, \
+ .write = rvu_dbg_##write_op, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define RVU_DEBUG_FOPS(name, read_op, write_op) \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = rvu_dbg_##read_op, \
+ .write = rvu_dbg_##write_op \
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
+/* Dumps current provisioning status of all RVU block LFs */
+static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int index, off = 0, flag = 0, go_back = 0, off_prev;
+ struct rvu *rvu = filp->private_data;
+ int lf, pf, vf, pcifunc;
+ struct rvu_block block;
+ int bytes_not_copied;
+ int buf_size = 2048;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
+ for (index = 0; index < BLK_COUNT; index++)
+ if (strlen(rvu->hw->block[index].name))
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%*s\t", (index - 1) * 2,
+ rvu->hw->block[index].name);
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d:VF%d\t\t", pf,
+ vf - 1);
+ } else {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d\t\t", pf);
+ }
+
+ off += go_back;
+ for (index = 0; index < BLKTYPE_MAX; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ off_prev = off;
+ for (lf = 0; lf < block.lf.max; lf++) {
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+ flag = 1;
+ off += scnprintf(&buf[off], buf_size - 1
+ - off, "%3d,", lf);
+ }
+ if (flag && off_prev != off)
+ off--;
+ else
+ go_back++;
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\t");
+ }
+ if (!flag)
+ off -= go_back;
+ else
+ flag = 0;
+ off--;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ }
+ }
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+ u16 *pcifunc)
+{
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0) {
+ dev_warn(rvu->dev, "Invalid blktype\n");
+ return false;
+ }
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+
+ if (lf < 0 || lf >= block->lf.max) {
+ dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
+ block->lf.max - 1);
+ return false;
+ }
+
+ *pcifunc = block->fn_map[lf];
+ if (!*pcifunc) {
+ dev_warn(rvu->dev,
+ "This LF is not attached to any RVU PFFUNC\n");
+ return false;
+ }
+ return true;
+}
+
+static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (!pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
+ pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
+ }
+
+ if (!pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
+ pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
+ }
+ kfree(buf);
+}
+
+/* The 'qsize' entry dumps current Aura/Pool context Qsize
+ * and each context's current enable/disable status in a bitmap.
+ */
+static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+ int blktype)
+{
+ void (*print_qsize)(struct seq_file *filp,
+ struct rvu_pfvf *pfvf) = NULL;
+ struct rvu_pfvf *pfvf;
+ struct rvu *rvu;
+ int qsize_id;
+ u16 pcifunc;
+
+ rvu = filp->private;
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ qsize_id = rvu->rvu_dbg.npa_qsize_id;
+ print_qsize = print_npa_qsize;
+ break;
+
+ case BLKTYPE_NIX:
+ qsize_id = rvu->rvu_dbg.nix_qsize_id;
+ print_qsize = print_nix_qsize;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ print_qsize(filp, pfvf);
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, int blktype)
+{
+ char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
+ struct seq_file *seqfile = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ struct rvu *rvu = seqfile->private;
+ u16 pcifunc;
+ int ret, lf;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
+ if (cmd_buf)
+ ret = -EINVAL;
+
+ if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ ret = -EINVAL;
+ goto qsize_write_done;
+ }
+ if (blktype == BLKTYPE_NPA)
+ rvu->rvu_dbg.npa_qsize_id = lf;
+ else
+ rvu->rvu_dbg.nix_qsize_id = lf;
+
+qsize_write_done:
+ kfree(cmd_buf_tmp);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NPA);
+}
+
+static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
+
+/* Dumps given NPA Aura's context */
+static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_aura_s *aura = &rsp->aura;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
+ aura->pool_way_mask, aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
+ (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+}
+
+/* Dumps given NPA Pool's context */
+static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_pool_s *pool = &rsp->pool;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
+ pool->stack_caching, pool->stack_way_mask);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+}
+
+/* Reads aura/pool's ctx from admin queue */
+static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+{
+ void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ int aura, rc, max_id;
+ int npalf, id, all;
+ struct rvu *rvu;
+ u16 pcifunc;
+
+ rvu = m->private;
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
+ id = rvu->rvu_dbg.npa_aura_ctx.id;
+ all = rvu->rvu_dbg.npa_aura_ctx.all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
+ id = rvu->rvu_dbg.npa_pool_ctx.id;
+ all = rvu->rvu_dbg.npa_pool_ctx.all;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NPA_AQ_INSTOP_READ;
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ max_id = pfvf->aura_ctx->qsize;
+ print_npa_ctx = print_npa_aura_ctx;
+ } else {
+ max_id = pfvf->pool_ctx->qsize;
+ print_npa_ctx = print_npa_pool_ctx;
+ }
+
+ if (id < 0 || id >= max_id) {
+ seq_printf(m, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(m, "Failed to read context\n");
+ return -EINVAL;
+ }
+ print_npa_ctx(m, &rsp);
+ }
+ return 0;
+}
+
+static int write_npa_ctx(struct rvu *rvu, bool all,
+ int npalf, int id, int ctype)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ if (!pfvf->aura_ctx) {
+ dev_warn(rvu->dev, "Aura context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->aura_ctx->qsize;
+ } else if (ctype == NPA_AQ_CTYPE_POOL) {
+ if (!pfvf->pool_ctx) {
+ dev_warn(rvu->dev, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->pool_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_aura_ctx.id = id;
+ rvu->rvu_dbg.npa_aura_ctx.all = all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_pool_ctx.id = id;
+ rvu->rvu_dbg.npa_pool_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *npalf,
+ int *id, bool *all)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+ int ret;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else {
+ ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos, int ctype)
+{
+ char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
+ "aura" : "pool";
+ struct seq_file *seqfp = filp->private_data;
+ struct rvu *rvu = seqfp->private;
+ int npalf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &npalf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_npa_ctx(rvu, all, npalf, id, ctype);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_AURA);
+}
+
+static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
+
+static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_POOL);
+}
+
+static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
+
+static void ndc_cache_stats(struct seq_file *s, int blk_addr,
+ int ctype, int transaction)
+{
+ u64 req, out_req, lat, cant_alloc;
+ struct rvu *rvu = s->private;
+ int port;
+
+ for (port = 0; port < NDC_MAX_PORT; port++) {
+ req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
+ (port, ctype, transaction));
+ lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
+ (port, ctype, transaction));
+ out_req = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_RWX_OSTDN_PC
+ (port, ctype, transaction));
+ cant_alloc = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_CANT_ALLOC_PC
+ (port, transaction));
+ seq_printf(s, "\nPort:%d\n", port);
+ seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
+ seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
+ seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
+ seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
+ seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
+ }
+}
+
+static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ seq_puts(s, "\n***** CACHE mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
+ seq_puts(s, "\n***** CACHE mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
+ return 0;
+}
+
+static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
+
+static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ struct rvu *rvu = s->private;
+ int bank, max_bank;
+
+ max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ for (bank = 0; bank < max_bank; bank++) {
+ seq_printf(s, "BANK:%d\n", bank);
+ seq_printf(s, "\tHits:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_HIT_PC(bank)));
+ seq_printf(s, "\tMiss:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_MISS_PC(bank)));
+ }
+ return 0;
+}
+
+static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_RX,
+ BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_TX,
+ BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
+
+static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+
+/* Dumps given nix_sq's context */
+static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+
+ seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
+ sq_ctx->sqe_way_mask, sq_ctx->cq);
+ seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ sq_ctx->sdp_mcast, sq_ctx->substream);
+ seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
+ sq_ctx->qint_idx, sq_ctx->ena);
+
+ seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
+ sq_ctx->sqb_count, sq_ctx->default_chan);
+ seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
+ sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
+ seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
+ sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
+
+ seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
+ sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
+ seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
+ sq_ctx->sq_int, sq_ctx->sqb_aura);
+ seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
+ sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
+ seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
+ sq_ctx->smenq_offset, sq_ctx->tail_offset);
+ seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
+ sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
+ seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
+ sq_ctx->cq_limit, sq_ctx->max_sqe_size);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
+ sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
+ seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
+ sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+/* Dumps given nix_rq's context */
+static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ rq_ctx->wqe_aura, rq_ctx->substream);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
+
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->sso_tt);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
+
+ seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
+ rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
+ seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
+ rq_ctx->xqe_imm_size, rq_ctx->later_skip);
+ seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
+ rq_ctx->first_skip, rq_ctx->lpb_sizem1);
+ seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
+ rq_ctx->spb_ena, rq_ctx->wqe_skip);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
+
+ seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
+ rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
+ seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+ seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
+ rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
+ seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
+ rq_ctx->xqe_pass, rq_ctx->xqe_drop);
+
+ seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
+ rq_ctx->qint_idx, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
+
+ seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
+ rq_ctx->flow_tagw, rq_ctx->bad_utag);
+ seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
+ rq_ctx->good_utag, rq_ctx->ltag);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_cq's context */
+static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->caching);
+ seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
+ cq_ctx->substream, cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+}
+
+static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
+ void *unused, int ctype)
+{
+ void (*print_nix_ctx)(struct seq_file *filp,
+ struct nix_aq_enq_rsp *rsp) = NULL;
+ struct rvu *rvu = filp->private;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ char *ctype_string = NULL;
+ int qidx, rc, max_id = 0;
+ struct rvu_pfvf *pfvf;
+ int nixlf, id, all;
+ u16 pcifunc;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
+ id = rvu->rvu_dbg.nix_cq_ctx.id;
+ all = rvu->rvu_dbg.nix_cq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
+ id = rvu->rvu_dbg.nix_sq_ctx.id;
+ all = rvu->rvu_dbg.nix_sq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
+ id = rvu->rvu_dbg.nix_rq_ctx.id;
+ all = rvu->rvu_dbg.nix_rq_ctx.all;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
+ seq_puts(filp, "SQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
+ seq_puts(filp, "RQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
+ seq_puts(filp, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ max_id = pfvf->sq_ctx->qsize;
+ ctype_string = "sq";
+ print_nix_ctx = print_nix_sq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ max_id = pfvf->rq_ctx->qsize;
+ ctype_string = "rq";
+ print_nix_ctx = print_nix_rq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ max_id = pfvf->cq_ctx->qsize;
+ ctype_string = "cq";
+ print_nix_ctx = print_nix_cq_ctx;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+ seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
+ ctype_string, nixlf, aq_req.qidx);
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(filp, "Failed to read the context\n");
+ return -EINVAL;
+ }
+ print_nix_ctx(filp, &rsp);
+ }
+ return 0;
+}
+
+static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
+ int id, int ctype, char *ctype_string)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->sq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ if (!pfvf->rq_ctx) {
+ dev_warn(rvu->dev, "RQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->rq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ if (!pfvf->cq_ctx) {
+ dev_warn(rvu->dev, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->cq_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
+ ctype_string, max_id - 1);
+ return -EINVAL;
+ }
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_cq_ctx.id = id;
+ rvu->rvu_dbg.nix_cq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_sq_ctx.id = id;
+ rvu->rvu_dbg.nix_sq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_rq_ctx.id = id;
+ rvu->rvu_dbg.nix_rq_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int ctype)
+{
+ struct seq_file *m = filp->private_data;
+ struct rvu *rvu = m->private;
+ char *cmd_buf, *ctype_string;
+ int nixlf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_SQ:
+ ctype_string = "sq";
+ break;
+ case NIX_AQ_CTYPE_RQ:
+ ctype_string = "rq";
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ ctype_string = "cq";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &nixlf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
+ ctype_string);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_SQ);
+}
+
+static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
+
+static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_RQ);
+}
+
+static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
+
+static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_CQ);
+}
+
+static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
+
+static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
+ unsigned long *bmap, char *qtype)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bitmap_print_to_pagebuf(false, buf, bmap, qsize);
+ seq_printf(filp, "%s context count : %d\n", qtype, qsize);
+ seq_printf(filp, "%s context ena/dis bitmap : %s\n",
+ qtype, buf);
+ kfree(buf);
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
+{
+ if (!pfvf->cq_ctx)
+ seq_puts(filp, "cq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
+ "cq");
+
+ if (!pfvf->rq_ctx)
+ seq_puts(filp, "rq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
+ "rq");
+
+ if (!pfvf->sq_ctx)
+ seq_puts(filp, "sq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
+ "sq");
+}
+
+static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NIX);
+}
+
+static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+
+static void rvu_dbg_nix_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_sq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_rq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_cq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.nix);
+}
+
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npa)
+ return;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
+ rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npa);
+}
+
+#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_RX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_TX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+static int cgx_print_stats(struct seq_file *s, int lmac_id)
+{
+ struct cgx_link_user_info linfo;
+ void *cgxd = s->private;
+ u64 ucast, mcast, bcast;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ struct rvu *rvu;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ /* Link status */
+ seq_puts(s, "\n=======Link Status======\n\n");
+ err = cgx_get_link_info(cgxd, lmac_id, &linfo);
+ if (err)
+ seq_puts(s, "Failed to read link status\n");
+ seq_printf(s, "\nLink is %s %d Mbps\n\n",
+ linfo.link_up ? "UP" : "DOWN", linfo.speed);
+
+ /* Rx stats */
+ seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
+ if (err)
+ return err;
+
+ /* Tx stats */
+ seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
+ if (err)
+ return err;
+
+ /* Rx stats */
+ seq_puts(s, "\n=======CGX RX_STATS======\n\n");
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ seq_puts(s, "\n=======CGX TX_STATS======\n\n");
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+ stat++;
+ }
+
+ return err;
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ int err, lmac_id;
+ char *buf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ buf = strrchr(current_dir->d_name.name, 'c');
+ if (!buf)
+ return -EINVAL;
+
+ err = kstrtoint(buf + 1, 10, &lmac_id);
+ if (!err) {
+ err = cgx_print_stats(filp, lmac_id);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+
+static void rvu_dbg_cgx_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+ int i, lmac_id;
+ char dname[20];
+ void *cgx;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+
+ for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
+ cgx = rvu_cgx_pdata(i, rvu);
+ if (!cgx)
+ continue;
+ /* cgx debugfs dir */
+ sprintf(dname, "cgx%d", i);
+ rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ rvu->rvu_dbg.cgx_root);
+ for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ /* lmac debugfs dir */
+ sprintf(dname, "lmac%d", lmac_id);
+ rvu->rvu_dbg.lmac =
+ debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
+
+ pfile = debugfs_create_file("stats", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_stat_fops);
+ if (!pfile)
+ goto create_failed;
+ }
+ }
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
+}
+
+/* NPC debugfs APIs */
+static void rvu_print_npc_mcam_info(struct seq_file *s,
+ u16 pcifunc, int blkaddr)
+{
+ struct rvu *rvu = s->private;
+ int entry_acnt, entry_ecnt;
+ int cntr_acnt, cntr_ecnt;
+
+ /* Skip PF0 */
+ if (!pcifunc)
+ return;
+ rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
+ &entry_acnt, &entry_ecnt);
+ rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
+ &cntr_acnt, &cntr_ecnt);
+ if (!entry_acnt && !cntr_acnt)
+ return;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+
+ if (entry_acnt) {
+ seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
+ seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
+ }
+ if (cntr_acnt) {
+ seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
+ seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
+ }
+}
+
+static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
+{
+ struct rvu *rvu = filp->private;
+ int pf, vf, numvfs, blkaddr;
+ struct npc_mcam *mcam;
+ u16 pcifunc;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM info:\n");
+ /* MCAM keywidth on receive and transmit sides */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+
+ mutex_lock(&mcam->lock);
+ /* MCAM entries */
+ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ mcam->total_entries - mcam->bmap_entries);
+ seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
+
+ /* MCAM counters */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ cfg = (cfg >> 48) & 0xFFFF;
+ seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
+ seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\t\t Available \t: %d\n",
+ rvu_rsrc_free_count(&mcam->counters));
+
+ if (mcam->bmap_entries == mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ seq_puts(filp, "\n\t\t Current allocation\n");
+ seq_puts(filp, "\t\t====================\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
+
+static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
+ void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct npc_mcam *mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
+ seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npc)
+ return;
+
+ pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_mcam_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_rx_miss_act_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npc);
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (!rvu->rvu_dbg.root) {
+ dev_err(rvu->dev, "%s failed\n", __func__);
+ return;
+ }
+ pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+ if (!pfile)
+ goto create_failed;
+
+ rvu_dbg_npa_init(rvu);
+ rvu_dbg_nix_init(rvu);
+ rvu_dbg_cgx_init(rvu);
+ rvu_dbg_npc_init(rvu);
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+void rvu_dbg_exit(struct rvu *rvu)
+{
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4a7609fd6dd0..8a59f7d53fbf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -64,7 +64,6 @@ enum nix_makr_fmt_indexes {
struct mce {
struct hlist_node node;
- u16 idx;
u16 pcifunc;
};
@@ -127,17 +126,12 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "NIX RX software sync failed\n");
-
- /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
- * bit too early. Hence wait for 50us more.
- */
- if (is_rvu_9xxx_A0(rvu))
- usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
@@ -155,13 +149,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
- /* For TL1 schq, sharing across VF's of same PF is ok */
- if (lvl == NIX_TXSCH_LVL_TL1 &&
- rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
- return false;
+ /* TLs aggegating traffic are shared across PF and VFs */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ return false;
+ else
+ return true;
+ }
- if (lvl != NIX_TXSCH_LVL_TL1 &&
- map_func != pcifunc)
+ if (map_func != pcifunc)
return false;
return true;
@@ -198,6 +194,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ /* Note that AF's VFs work in pairs and talk over consecutive
+ * loopback channels.Therefore if odd number of AF VFs are
+ * enabled then the last VF remains with no pair.
+ */
pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
NIX_CHAN_LBK_CHX(0, vf + 1);
@@ -382,7 +383,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
- int rss_sz, int rss_grps, int hwctx_size)
+ int rss_sz, int rss_grps, int hwctx_size,
+ u64 way_mask)
{
int err, grp, num_indices;
@@ -402,7 +404,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
/* Config full RSS table size, enable RSS and caching */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
+ way_mask << 20);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -663,6 +666,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static const char *nix_get_ctx_name(int ctype)
+{
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ return "CQ";
+ case NIX_AQ_CTYPE_SQ:
+ return "SQ";
+ case NIX_AQ_CTYPE_RQ:
+ return "RQ";
+ case NIX_AQ_CTYPE_RSS:
+ return "RSS";
+ }
+ return "";
+}
+
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -707,21 +725,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
- (req->ctype == NIX_AQ_CTYPE_CQ) ?
- "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
- "RQ" : "SQ"), qidx);
+ nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+ struct nix_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NIX_AQ_INSTOP_INIT)
+ return 0;
+
+ if (req->ctype == NIX_AQ_CTYPE_MCE ||
+ req->ctype == NIX_AQ_CTYPE_DYNO)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+ lock_ctx_req.qidx = req->qidx;
+ err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+ req->hdr.pcifunc,
+ nix_get_ctx_name(req->ctype), req->qidx);
+ return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = nix_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -745,6 +802,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -810,7 +870,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
- cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
@@ -825,7 +885,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
- cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
@@ -840,13 +901,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
- cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
- err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
- req->rss_sz, req->rss_grps, hwctx_size);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
+ req->rss_grps, hwctx_size, req->way_mask);
if (err)
goto free_mem;
@@ -860,7 +922,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
@@ -872,7 +936,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
@@ -1048,6 +1113,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int link;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ return;
+
/* Reset TL4's SDP link config */
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
@@ -1061,83 +1129,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-static int
-rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
- u16 *schq_list, u16 *schq_cnt)
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
- struct nix_txsch *txsch;
- struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
- u8 cgx_id, lmac_id;
- u16 schq_base;
- u32 *pfvf_map;
- int pf, intf;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -ENODEV;
+ if (is_afvf(pcifunc)) {/* LBK links */
+ return hw->cgx_links;
+ } else if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return (cgx_id * hw->lmac_per_cgx) + lmac_id;
+ }
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
- pfvf_map = txsch->pfvf_map;
- pf = rvu_get_pf(pcifunc);
+ /* SDP link */
+ return hw->cgx_links + hw->lbk_links;
+}
- /* static allocation as two TL1's per link */
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
+ int link, int *start, int *end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
- switch (intf) {
- case NIX_INTF_TYPE_CGX:
- rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
- schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
- break;
- case NIX_INTF_TYPE_LBK:
- schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
- break;
- default:
- return -ENODEV;
+ if (is_afvf(pcifunc)) { /* LBK links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
+ } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
+ } else { /* SDP link */
+ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
+ (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
+ *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
}
+}
- if (schq_base + 1 > txsch->schq.max)
- return -ENODEV;
+static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ struct nix_hw *nix_hw,
+ struct nix_txsch_alloc_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int schq, req_schq, free_cnt;
+ struct nix_txsch *txsch;
+ int link, start, end;
- /* init pfvf_map as we store flags */
- if (pfvf_map[schq_base] == U32_MAX) {
- pfvf_map[schq_base] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
- pfvf_map[schq_base + 1] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
- /* Onetime reset for TL1 */
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
+ if (!req_schq)
+ return 0;
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ /* For traffic aggregating scheduler level, one queue is enough */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (req_schq != 1)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ return 0;
}
- if (schq_list && schq_cnt) {
- schq_list[0] = schq_base;
- schq_list[1] = schq_base + 1;
- *schq_cnt = 2;
+ /* Get free SCHQ count and check if request can be accomodated */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
+ if (end <= txsch->schq.max && schq < end &&
+ !test_bit(schq, txsch->schq.bmap))
+ free_cnt = 1;
+ else
+ free_cnt = 0;
+ } else {
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
+ if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
return 0;
}
+static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
+ struct nix_txsch_alloc_rsp *rsp,
+ int lvl, int start, int end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = rsp->hdr.pcifunc;
+ int idx, schq;
+
+ /* For traffic aggregating levels, queue alloc is based
+ * on transmit link to which PF_FUNC is mapped to.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ /* A single TL queue is allocated */
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ rsp->schq_contig_list[lvl][0] = start;
+ }
+
+ /* Both contig and non-contig reqs doesn't make sense here */
+ if (rsp->schq_contig[lvl])
+ rsp->schq[lvl] = 0;
+
+ if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ rsp->schq_list[lvl][0] = start;
+ }
+ return;
+ }
+
+ /* Adjust the queue request count if HW supports
+ * only one queue per level configuration.
+ */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ idx = pcifunc & RVU_PFVF_FUNC_MASK;
+ schq = start + idx;
+ if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
+ rsp->schq_contig[lvl] = 0;
+ rsp->schq[lvl] = 0;
+ return;
+ }
+
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][0] = schq;
+ rsp->schq[lvl] = 0;
+ } else if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][0] = schq;
+ }
+ return;
+ }
+
+ /* Allocate contiguous queue indices requesty first */
+ if (rsp->schq_contig[lvl]) {
+ schq = bitmap_find_next_zero_area(txsch->schq.bmap,
+ txsch->schq.max, start,
+ rsp->schq_contig[lvl], 0);
+ if (schq >= end)
+ rsp->schq_contig[lvl] = 0;
+ for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Allocate non-contiguous queue indices */
+ if (rsp->schq[lvl]) {
+ idx = 0;
+ for (schq = start; schq < end; schq++) {
+ if (!test_bit(schq, txsch->schq.bmap)) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][idx++] = schq;
+ }
+ if (idx == rsp->schq[lvl])
+ break;
+ }
+ /* Update how many were allocated */
+ rsp->schq[lvl] = idx;
+ }
+}
+
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int link, blkaddr, rc = 0;
+ int lvl, idx, start, end;
struct nix_txsch *txsch;
- int lvl, idx, req_schq;
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
- int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq;
@@ -1151,83 +1321,66 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return -EINVAL;
mutex_lock(&rvu->rsrc_lock);
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- txsch = &nix_hw->txsch[lvl];
- req_schq = req->schq_contig[lvl] + req->schq[lvl];
- pfvf_map = txsch->pfvf_map;
-
- if (!req_schq)
- continue;
- /* There are only 28 TL1s */
- if (lvl == NIX_TXSCH_LVL_TL1) {
- if (req->schq_contig[lvl] ||
- req->schq[lvl] > 2 ||
- rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, NULL, NULL))
- goto err;
- continue;
- }
-
- /* Check if request is valid */
- if (req_schq > MAX_TXSCHQ_PER_FUNC)
- goto err;
-
- /* If contiguous queues are needed, check for availability */
- if (req->schq_contig[lvl] &&
- !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
- goto err;
-
- /* Check if full request can be accommodated */
- if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ /* Check if request is valid as per HW capabilities
+ * and can be accomodated.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
+ if (rc)
goto err;
}
+ /* Allocate requested Tx scheduler queues */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
- rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
- rsp->schq[lvl] = req->schq[lvl];
if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
- /* Handle TL1 specially as it is
- * allocation is restricted to 2 TL1's
- * per link
- */
+ rsp->schq[lvl] = req->schq[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
- if (lvl == NIX_TXSCH_LVL_TL1) {
- rsp->schq_contig[lvl] = 0;
- rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
- &rsp->schq_list[lvl][0],
- &rsp->schq[lvl]);
- continue;
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ start = link;
+ end = link;
+ } else if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ } else {
+ start = 0;
+ end = txsch->schq.max;
}
- /* Alloc contiguous queues first */
- if (req->schq_contig[lvl]) {
- schq = rvu_alloc_rsrc_contig(&txsch->schq,
- req->schq_contig[lvl]);
+ nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
- for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ /* Reset queue config */
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ schq = rsp->schq_contig_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
- nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_contig_list[lvl][idx] = schq;
- schq++;
- }
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
}
- /* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
- schq = rvu_alloc_rsrc(&txsch->schq);
- pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ schq = rsp->schq_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_list[lvl][idx] = schq;
}
}
+
+ rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
+ rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
+ NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
goto exit;
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
@@ -1236,13 +1389,50 @@ exit:
return rc;
}
+static void nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+ u64 cfg;
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+ if (restore_tx_en)
+ cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+}
+
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, nixlf, lvl, schq, err;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1275,26 +1465,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- err = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (err) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
}
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- /* Free all SCHQ's except TL1 as
- * TL1 is shared across all VF's for a RVU PF
- */
- if (lvl == NIX_TXSCH_LVL_TL1)
+ /* TLs above aggregation level are shared across all PF
+ * and it's VFs, hence skip freeing them.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
txsch = &nix_hw->txsch[lvl];
@@ -1302,7 +1481,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
}
mutex_unlock(&rvu->rsrc_lock);
@@ -1319,13 +1498,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
- int lvl, schq, nixlf, blkaddr, rc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int lvl, schq, nixlf, blkaddr;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1343,10 +1521,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
- /* Don't allow freeing TL1 */
- if (lvl > NIX_TXSCH_LVL_TL2 ||
- schq >= txsch->schq.max)
- goto err;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ return 0;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
@@ -1359,24 +1535,12 @@ static int nix_txschq_free_one(struct rvu *rvu,
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- rc = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (rc) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
- }
+ if (lvl == NIX_TXSCH_LVL_SMQ)
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
@@ -1393,8 +1557,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
return nix_txschq_free_one(rvu, req);
}
-static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
- int lvl, u64 reg, u64 regval)
+static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
@@ -1431,79 +1595,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-static int
-nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
{
- u16 schq_list[2], schq_cnt, schq;
- int blkaddr, idx, err = 0;
- u16 map_func, map_flags;
- struct nix_hw *nix_hw;
- u64 reg, regval;
- u32 *pfvf_map;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ u64 regbase;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -EINVAL;
-
- pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
-
- mutex_lock(&rvu->rsrc_lock);
-
- err = rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, schq_list, &schq_cnt);
- if (err)
- goto unlock;
+ if (hw->cap.nix_shaping)
+ return true;
- for (idx = 0; idx < schq_cnt; idx++) {
- schq = schq_list[idx];
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+ /* If shaping and coloring is not supported, then
+ * *_CIR and *_PIR registers should not be configured.
+ */
+ regbase = reg & 0xFFFF;
- /* check if config is already done or this is pf */
- if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
- continue;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ if (regbase == NIX_AF_TL1X_CIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ if (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ if (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ if (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0))
+ return false;
+ break;
+ }
+ return true;
+}
- /* default configuration */
- reg = NIX_AF_TL1X_TOPOLOGY(schq);
- regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_SCHEDULE(schq);
- regval = TXSCH_TL1_DFLT_RR_QTM;
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_CIR(schq);
- regval = 0;
- rvu_write64(rvu, blkaddr, reg, regval);
+static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
+ u16 pcifunc, int blkaddr)
+{
+ u32 *pfvf_map;
+ int schq;
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
- }
-unlock:
- mutex_unlock(&rvu->rsrc_lock);
- return err;
+ schq = nix_get_tx_link(rvu, pcifunc);
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+ /* Skip if PF has already done the config */
+ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
+ return;
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
+ (TXSCH_TL1_DFLT_RR_PRIO << 1));
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
- u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
- u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
+ int nixlf, schq;
u32 *pfvf_map;
- int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
@@ -1512,19 +1679,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
- /* VF is only allowed to trigger
- * setting default cfg on TL1
- */
- if (pcifunc & RVU_PFVF_FUNC_MASK &&
- req->lvl == NIX_TXSCH_LVL_TL1) {
- return nix_tl1_default_cfg(rvu, pcifunc);
+ if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
+ pcifunc & RVU_PFVF_FUNC_MASK) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (req->lvl == NIX_TXSCH_LVL_TL1)
+ nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
}
for (idx = 0; idx < req->num_regs; idx++) {
@@ -1532,10 +1696,14 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
- if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
+ /* Check if shaping and coloring is supported */
+ if (!is_txschq_shaping_valid(hw, req->lvl, reg))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -1544,32 +1712,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
+ /* Clear 'BP_ENA' config, if it's not allowed */
+ if (!hw->cap.nix_tx_link_bp) {
+ if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
+ (schq_regbase & 0xFF00) ==
+ NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
+ regval &= ~BIT_ULL(13);
+ }
+
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
-
mutex_lock(&rvu->rsrc_lock);
-
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
-
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
+ NIX_TXSCHQ_CFG_DONE);
mutex_unlock(&rvu->rsrc_lock);
}
- rvu_write64(rvu, blkaddr, reg, regval);
-
- /* Check for SMQ flush, if so, poll for its completion */
+ /* SMQ flush is special hence split register writes such
+ * that flush first and write rest of the bits later.
+ */
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
- err = rvu_poll_reg(rvu, blkaddr,
- reg, BIT_ULL(49), true);
- if (err)
- return NIX_AF_SMQ_FLUSH_FAILED;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ regval &= ~BIT_ULL(49);
}
+ rvu_write64(rvu, blkaddr, reg, regval);
}
+
return 0;
}
@@ -1650,7 +1822,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
}
static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, int idx, bool add)
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -1679,7 +1851,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
- mce->idx = idx;
mce->pcifunc = pcifunc;
if (!tail)
hlist_add_head(&mce->node, &mce_list->head);
@@ -1691,12 +1862,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
{
- int err = 0, idx, next_idx, count;
+ int err = 0, idx, next_idx, last_idx;
struct nix_mce_list *mce_list;
- struct mce *mce, *next_mce;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
+ struct mce *mce;
int blkaddr;
/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
@@ -1728,31 +1899,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ err = nix_update_mce_list(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
-
- if (!mce_list->count)
+ if (!mce_list->count) {
+ rvu_npc_disable_bcast_entry(rvu, pcifunc);
goto end;
- count = mce_list->count;
+ }
/* Dump the updated list to HW */
+ idx = pfvf->bcast_mce_idx;
+ last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
- next_idx = 0;
- count--;
- if (count) {
- next_mce = hlist_entry(mce->node.next,
- struct mce, node);
- next_idx = next_mce->idx;
- }
+ if (idx > last_idx)
+ break;
+
+ next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, mce->idx,
- NIX_AQ_INSTOP_WRITE, mce->pcifunc,
- next_idx, count ? false : true);
+ err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
+ idx++;
}
end:
@@ -1849,8 +2020,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
+ int err, lvl, schq;
u64 cfg, reg;
- int err, lvl;
/* Get scheduler queue count of each type and alloc
* bitmap for each for alloc/free/attach operations.
@@ -1888,7 +2059,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
- memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
+ for (schq = 0; schq < txsch->schq.max; schq++)
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
return 0;
}
@@ -2032,51 +2204,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
if (field_marker)
memset(&tmp, 0, sizeof(tmp));
+ field_marker = true;
+ keyoff_marker = true;
switch (key_type) {
case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP;
+ }
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
- field_marker = true;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP6;
+ }
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
case NIX_FLOW_KEY_TYPE_SCTP:
+ case NIX_FLOW_KEY_TYPE_INNR_TCP:
+ case NIX_FLOW_KEY_TYPE_INNR_UDP:
+ case NIX_FLOW_KEY_TYPE_INNR_SCTP:
field->lid = NPC_LID_LD;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
+ field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
- if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
+
+ /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ * so no need to change the ltype_match, just change
+ * the lid for inner protocols
+ */
+ BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
+ (int)NPC_LT_LH_TU_TCP);
+ BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
+ (int)NPC_LT_LH_TU_UDP);
+ BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
+ (int)NPC_LT_LH_TU_SCTP);
+
+ if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
group_member = true;
}
field->ltype_mask = ~field->ltype_match;
- if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
/* Handle the case where any of the group item
* is enabled in the group but not the final one
*/
@@ -2084,13 +2287,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
valid_key = true;
group_member = false;
}
- field_marker = true;
- keyoff_marker = true;
} else {
field_marker = false;
keyoff_marker = false;
}
break;
+ case NIX_FLOW_KEY_TYPE_NVGRE:
+ field->lid = NPC_LID_LD;
+ field->hdr_offset = 4; /* VSID offset */
+ field->bytesm1 = 2;
+ field->ltype_match = NPC_LT_LD_NVGRE;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VXLAN:
+ case NIX_FLOW_KEY_TYPE_GENEVE:
+ field->lid = NPC_LID_LE;
+ field->bytesm1 = 2;
+ field->hdr_offset = 4;
+ field->ltype_mask = 0xF;
+ field_marker = false;
+ keyoff_marker = false;
+
+ if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
+ field->ltype_match |= NPC_LT_LE_VXLAN;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
+ field->ltype_match |= NPC_LT_LE_GENEVE;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
+ if (group_member) {
+ field->ltype_mask = ~field->ltype_match;
+ field_marker = true;
+ keyoff_marker = true;
+ valid_key = true;
+ group_member = false;
+ }
+ }
+ break;
+ case NIX_FLOW_KEY_TYPE_ETH_DMAC:
+ case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
+ field->lid = NPC_LID_LA;
+ field->ltype_match = NPC_LT_LA_ETHER;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
+ field->lid = NPC_LID_LF;
+ field->ltype_match = NPC_LT_LF_TU_ETHER;
+ }
+ field->hdr_offset = 0;
+ field->bytesm1 = 5; /* DMAC 6 Byte */
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6_EXT:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 40; /* IPV6 hdr */
+ field->bytesm1 = 0; /* 1 Byte ext hdr*/
+ field->ltype_match = NPC_LT_LC_IP6_EXT;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_GTPU:
+ field->lid = NPC_LID_LE;
+ field->hdr_offset = 4;
+ field->bytesm1 = 3; /* 4 bytes TID*/
+ field->ltype_match = NPC_LT_LE_GTPU;
+ field->ltype_mask = 0xF;
+ break;
}
field->ena = 1;
@@ -2449,8 +2712,6 @@ linkcfg:
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
-
return 0;
}
@@ -2591,9 +2852,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link),
tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link),
- tx_credits);
}
}
@@ -2605,8 +2863,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
}
}
@@ -2674,6 +2930,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of SQB aka SQEs */
+ cfg |= 0x04ULL;
+#endif
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
@@ -2704,13 +2964,25 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
- /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
- * internal state when conditional clocks are turned off.
- * Hence enable them.
- */
- if (is_rvu_9xxx_A0(rvu))
+ if (is_rvu_96xx_B0(rvu)) {
+ /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
- rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ }
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
@@ -2763,23 +3035,23 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
(NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
0x0F);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
@@ -2825,7 +3097,7 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -2853,7 +3125,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
@@ -2867,7 +3140,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
@@ -2883,6 +3157,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ rvu_cgx_start_stop_io(rvu, pcifunc, false);
+
if (pfvf->sq_ctx) {
ctx_req.ctype = NIX_AQ_CTYPE_SQ;
err = nix_lf_hwctx_disable(rvu, &ctx_req);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index c0e165dfc403..6e7c7f459f74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp)
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
@@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
- cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
@@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
- rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
@@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 15f70273e29c..40e431debbe9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
}
+static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
+ }
+}
+
static void npc_get_keyword(struct mcam_entry *entry, int idx,
u64 *cam0, u64 *cam1)
{
@@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
actindex = index;
index &= (mcam->banksize - 1);
+ /* Disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
+
/* CAM1 takes the comparison value and
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
@@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable the entry */
if (enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
- else
- npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
@@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true);
/* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+ entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
entry.vtag_action = VTAG0_VALID_BIT |
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
@@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
-#ifdef MCAST_MCE
struct rvu_pfvf *pfvf;
-#endif
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Only PF can add a bcast match entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ /* Skip LBK VFs */
+ if (is_afvf(pcifunc))
return;
-#ifdef MCAST_MCE
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
-#endif
+ /* If pkt replication is not supported,
+ * then only PF is allowed to add a bcast match entry.
+ */
+ if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel
- * NOTE: Since MKEX default profile(a reduced version intended to
- * accommodate more capability but igoring few bits) a stap-gap
- * approach.
- * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
- * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
- * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
- * DSCP.
- *
- * Reduced layout of MKEX default profile -
- * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
- *
- * BIT_POS[31:28] : LD
- * BIT_POS[27:24] : LC
- * BIT_POS[23:20] : LB
- * BIT_POS[19:16] : LA
- * BIT_POS[15:12] : L3B, L3M, L2B, L2M
- * BIT_POS[11:00] : CHAN
- *
+ /* Match ingress channel */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xfffull;
+
+ /* Match broadcast MAC address.
+ * DMAC is extracted at 0th bit of PARSE_KEX::KW1
*/
- entry.kw[0] = BIT_ULL(13) | chan;
- entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
+ entry.kw[1] = 0xffffffffffffull;
+ entry.kw_mask[1] = 0xffffffffffffull;
*(u64 *)&action = 0x00;
-#ifdef MCAST_MCE
- /* Early silicon doesn't support pkt replication,
- * so install entry with UCAST action, so that PF
- * receives all broadcast packets.
- */
- action.op = NIX_RX_ACTIONOP_MCAST;
- action.pf_func = pcifunc;
- action.index = pfvf->bcast_mce_idx;
-#else
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
-#endif
+ if (!hw->cap.nix_rx_multicast) {
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ } else {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ action.index = pfvf->bcast_mce_idx;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ }
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
@@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
/* Layer B: Stacked VLAN (STAG|QinQ) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
/* Layer C: IPv4 */
/* SIP+DIP: 8 bytes, KW2[63:0] */
@@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0 pass silicon,
+ /* Due to an errata (35786) in A0/B0 pass silicon,
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_9xxx_A0(rvu) &&
+ if (is_rvu_96xx_B0(rvu) &&
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
mcam_kex->keyx_cfg[NIX_INTF_TX])
goto load_default;
@@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop pkt. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1101,6 +1143,7 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
u64 keyz = NPC_MCAM_KEY_X2;
int blkaddr, entry, bank, err;
u64 cfg, nibble_ena;
@@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_9xxx_A0(rvu))
+ if (!is_rvu_96xx_B0(rvu))
nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
((keyz & 0x3) << 32) | nibble_ena);
@@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
NIX_TX_ACTIONOP_UCAST_DEFAULT);
- /* If MCAM lookup doesn't result in a match, drop the received packet */
+ /* If MCAM lookup doesn't result in a match, drop the received packet.
+ * And map this action to a counter to count dropped pkts.
+ */
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
NIX_RX_ACTIONOP_DROP);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
+ BIT_ULL(9) | mcam->rx_miss_act_cntr);
return 0;
}
@@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu)
mutex_destroy(&mcam->lock);
}
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int entry;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (entry = 0; entry < mcam->bmap_entries; entry++) {
+ if (mcam->entry2pfvf_map[entry] == pcifunc) {
+ (*alloc_cnt)++;
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
+ (*enable_cnt)++;
+ }
+ }
+}
+
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int cntr;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ (*alloc_cnt)++;
+ if (mcam->cntr_refcnt[cntr])
+ (*enable_cnt)++;
+ }
+ }
+}
+
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 09a8d61f3144..7ca599b973c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -246,6 +246,7 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830)
@@ -435,7 +436,6 @@
#define CPT_AF_LF_RST (0x44000)
#define CPT_AF_BLK_RST (0x46000)
-#define NDC_AF_BLK_RST (0x002F0)
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
@@ -499,4 +499,30 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+/* NDC */
+#define NDC_AF_CONST (0x00000)
+#define NDC_AF_CLK_EN (0x00020)
+#define NDC_AF_CTL (0x00030)
+#define NDC_AF_BANK_CTL (0x00040)
+#define NDC_AF_BANK_CTL_DONE (0x00048)
+#define NDC_AF_INTR (0x00058)
+#define NDC_AF_INTR_W1S (0x00060)
+#define NDC_AF_INTR_ENA_W1S (0x00068)
+#define NDC_AF_INTR_ENA_W1C (0x00070)
+#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_BP_TEST_ENABLE (0x001F8)
+#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
+#define NDC_AF_BLK_RST (0x002F0)
+#define NDC_PRIV_AF_INT_CFG (0x002F8)
+#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
+#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
+ (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
+ (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
+ (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
+ (0x00F00 | (a) << 5 | (b) << 4)
+#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
+#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index f920dac74e6c..9d8942acc232 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -13,22 +13,22 @@
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
- BLKADDR_RVUM = 0x0ULL,
- BLKADDR_LMT = 0x1ULL,
- BLKADDR_MSIX = 0x2ULL,
- BLKADDR_NPA = 0x3ULL,
- BLKADDR_NIX0 = 0x4ULL,
- BLKADDR_NIX1 = 0x5ULL,
- BLKADDR_NPC = 0x6ULL,
- BLKADDR_SSO = 0x7ULL,
- BLKADDR_SSOW = 0x8ULL,
- BLKADDR_TIM = 0x9ULL,
- BLKADDR_CPT0 = 0xaULL,
- BLKADDR_CPT1 = 0xbULL,
- BLKADDR_NDC0 = 0xcULL,
- BLKADDR_NDC1 = 0xdULL,
- BLKADDR_NDC2 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLK_COUNT = 0xfULL,
};
/* RVU Block Type Enumeration */
@@ -474,9 +474,9 @@ struct nix_cq_ctx_s {
u64 ena : 1;
u64 drop_ena : 1;
u64 drop : 8;
- u64 dp : 8;
+ u64 bp : 8;
#else
- u64 dp : 8;
+ u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 51b77c2de400..3fb7ee3d4d13 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1489,8 +1489,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
- pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
of_node_put(np);
+ err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
+ if (err && err != -ENODEV)
+ goto err_netdev;
}
/* Hardware supports only 3 ports */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 703adb96429e..1923ba76a1ec 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2180,6 +2180,31 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2200,6 +2225,8 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
@@ -2252,6 +2279,8 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
@@ -2375,8 +2404,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
@@ -2385,19 +2412,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
-
- /* setup the forward port to send frame to PDMA */
- val &= ~0xffff;
-
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
-
- /* setup the mac dma */
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
-
return 0;
err_disable_pm:
@@ -2758,9 +2772,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
+ phy_interface_t phy_mode;
struct phylink *phylink;
- int phy_mode, id, err;
struct mtk_mac *mac;
+ int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2805,10 +2820,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
/* phylink create */
- phy_mode = of_get_phy_mode(np);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(np, &phy_mode);
+ if (err) {
dev_err(eth->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto free_netdev;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 76bd12cb8150..85830fe14a1b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -84,6 +84,8 @@
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 22c72fb7206a..5716c3d2bb86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
/*
* Subtract 1 from the limit because we need to allocate a
- * spare CQE so the HCA HW can tell the difference between an
- * empty CQ and a full CQ.
+ * spare CQE to enable resizing the CQ.
*/
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
@@ -4015,6 +4014,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
pci_save_state(pdev);
return 0;
@@ -4126,6 +4126,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
+ devlink_reload_disable(devlink);
+
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 381925c90d94..ac108f1e5bd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -85,6 +85,22 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
+static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_unload_one(dev, false);
+}
+
+static int mlx5_devlink_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -96,6 +112,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
+ .reload_down = mlx5_devlink_reload_down,
+ .reload_up = mlx5_devlink_reload_up,
};
struct devlink *mlx5_devlink_alloc(void)
@@ -177,12 +195,29 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
};
+static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_enable_roce_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -197,6 +232,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
value);
+
+ value.vbool = MLX5_CAP_GEN(dev, roce);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
@@ -213,6 +253,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
return 0;
params_reg_err:
@@ -222,6 +263,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ devlink_reload_disable(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 13af72556987..5316cedd78bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
+ struct neighbour *n;
struct rtable *rt;
- struct neighbour *n = NULL;
#if IS_ENABLED(CONFIG_INET)
struct mlx5_core_dev *mdev = priv->mdev;
@@ -138,8 +138,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
- struct neighbour *n = NULL;
struct dst_entry *dst;
+ struct neighbour *n;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int ret;
@@ -212,8 +212,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ struct neighbour *n;
int ipv4_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto out;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -328,9 +331,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi6 fl6 = {};
struct ipv6hdr *ip6h;
+ struct neighbour *n;
int ipv6_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto out;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1d4a66fb466a..e8d799c0dfda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "lib/mlx5.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -5419,6 +5420,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
}
+ dev_net_set(netdev, mlx5_core_net(mdev));
priv = netdev_priv(netdev);
err = mlx5e_attach(mdev, priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index cd9bb7c7b341..f175cb24bb67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -47,6 +47,7 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
@@ -1243,21 +1244,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(mlx5e_rep_block_cb_list);
+static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload cls_flower;
+ struct mlx5e_priv *priv = cb_priv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ flags = MLX5_TC_FLAG(INGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+ esw = priv->mdev->priv.eswitch;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ return -EOPNOTSUPP;
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ */
+ memcpy(&cls_flower, f, sizeof(*f));
+ cls_flower.common.chain_index = FDB_FT_CHAIN;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
+ memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
+static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct flow_block_offload *f = type_data;
+ f->unlocked_driver_cb = true;
+
switch (type) {
case TC_SETUP_BLOCK:
- f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_cb_list,
+ &mlx5e_rep_block_tc_cb_list,
mlx5e_rep_setup_tc_cb,
priv, priv, true);
+ case TC_SETUP_FT:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_ft_cb_list,
+ mlx5e_rep_setup_ft_cb,
+ priv, priv, true);
default:
return -EOPNOTSUPP;
}
@@ -1877,6 +1917,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
+ dev_net_set(netdev, mlx5_core_net(dev));
rpriv->netdev = netdev;
rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 82cffb3a9964..9e9960146e5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1386,6 +1386,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
if (rq->cqd.left || work_done >= budget)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index bb970b2ebf8a..3a707d788022 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -74,6 +74,7 @@ enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
@@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
+static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, FT);
+}
+
static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
@@ -1074,7 +1080,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1091,7 +1097,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1168,7 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->chain > max_chain) {
+ /* We check chain range only for tc flows.
+ * For ft flows, we checked attr->chain was originally 0 and set it to
+ * FDB_FT_CHAIN which is outside tc range.
+ * See mlx5e_rep_setup_ft_cb().
+ */
+ if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
@@ -3217,6 +3228,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
bool encap = false;
u32 action = 0;
@@ -3261,6 +3273,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
+ if (ft_flow && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return -EOPNOTSUPP;
+ }
+
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
@@ -3385,6 +3405,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
if (dest_chain <= attr->chain) {
NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
return -EOPNOTSUPP;
@@ -3475,6 +3499,8 @@ static void get_flags(int flags, unsigned long *flow_flags)
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
+ if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
*flow_flags = __flow_flags;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 924c6ef86a14..262cdb7b69b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -44,7 +44,8 @@ enum {
MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
- MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
};
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 89a2806eceb8..f2e400a23a59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -111,42 +111,32 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
}
/* E-Switch vport context HW commands */
-static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *in, int inlen)
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *in, int inlen)
-{
- return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
-}
-
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *out, int outlen)
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
MLX5_SET(query_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *out, int outlen)
-{
- return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
-}
-
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
@@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
+ in, sizeof(in));
}
/* E-Switch FDB */
@@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
@@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw)
if (ret)
return ret;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
- return 0;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
- esw_cleanup_vepa_rules(esw);
- esw_destroy_legacy_fdb_table(esw);
- esw_destroy_legacy_vepa_table(esw);
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ if (ret)
+ esw_destroy_legacy_table(esw);
+ return ret;
}
static void esw_legacy_disable(struct mlx5_eswitch *esw)
@@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (esw->manager_vport == vport)
+ if (mlx5_esw_is_manager_vport(esw, vport))
goto fdb_add;
err = mlx5_mpfs_add_mac(esw->dev, mac);
@@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u16 vport = vaddr->vport;
int err = 0;
- /* Skip mlx5_mpfs_del_mac for eswitch managerss,
+ /* Skip mlx5_mpfs_del_mac for eswitch managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (!vaddr->mpfs || esw->manager_vport == vport)
+ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
goto fdb_del;
err = mlx5_mpfs_del_mac(esw->dev, mac);
@@ -1040,14 +1033,15 @@ out:
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
mlx5_del_flow_rules(vport->egress.allowed_vlan);
+ vport->egress.allowed_vlan = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
- mlx5_del_flow_rules(vport->egress.drop_rule);
-
- vport->egress.allowed_vlan = NULL;
- vport->egress.drop_rule = NULL;
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
+ mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
+ vport->egress.legacy.drop_rule = NULL;
+ }
}
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
- */
- int table_size = 4;
- int err = 0;
-
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- return 0;
-
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
- return -EOPNOTSUPP;
- }
+ int err;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
- vport->vport, err);
- goto out;
- }
- vport->ingress.acl = acl;
-
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto spoof_err;
}
- vport->ingress.allow_untagged_spoofchk_grp = g;
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto untagged_err;
}
- vport->ingress.allow_untagged_only_grp = g;
+ vport->ingress.legacy.allow_untagged_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1158,108 +1116,178 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto allow_spoof_err;
}
- vport->ingress.allow_spoofchk_only_grp = g;
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto drop_err;
}
- vport->ingress.drop_grp = g;
+ vport->ingress.legacy.drop_grp = g;
+ kvfree(flow_group_in);
+ return 0;
-out:
- if (err) {
- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_spoofchk_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_spoofchk_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- mlx5_destroy_flow_table(vport->ingress.acl);
+drop_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
-
+allow_spoof_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+untagged_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+spoof_err:
kvfree(flow_group_in);
return err;
}
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, int table_size)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ int vport_index;
+ int err;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport_index);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
+ vport->vport);
+ return -EOPNOTSUPP;
+ }
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+ vport->ingress.acl = acl;
+ return 0;
+}
+
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.acl)
+ return;
+
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+}
+
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
- mlx5_del_flow_rules(vport->ingress.drop_rule);
+ if (vport->ingress.legacy.drop_rule) {
+ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
+ vport->ingress.legacy.drop_rule = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ if (vport->ingress.allow_rule) {
mlx5_del_flow_rules(vport->ingress.allow_rule);
-
- vport->ingress.drop_rule = NULL;
- vport->ingress.allow_rule = NULL;
-
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ vport->ingress.allow_rule = NULL;
+ }
}
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (IS_ERR_OR_NULL(vport->ingress.acl))
+ if (!vport->ingress.acl)
return;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_vport_cleanup_ingress_rules(esw, vport);
- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
- mlx5_destroy_flow_group(vport->ingress.drop_grp);
- mlx5_destroy_flow_table(vport->ingress.acl);
- vport->ingress.acl = NULL;
- vport->ingress.drop_grp = NULL;
- vport->ingress.allow_spoofchk_only_grp = NULL;
- vport->ingress.allow_untagged_only_grp = NULL;
- vport->ingress.allow_untagged_spoofchk_grp = NULL;
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+ if (vport->ingress.legacy.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
+ vport->ingress.legacy.drop_grp = NULL;
+ }
+ esw_vport_destroy_ingress_acl_table(vport);
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
+ struct mlx5_flow_spec *spec = NULL;
int dest_num = 0;
int err = 0;
u8 *smac_v;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
return 0;
}
- err = esw_vport_enable_ingress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
+ if (!vport->ingress.acl) {
+ err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] enable ingress acl err (%d)\n",
+ err, vport->vport);
+ return err;
+ }
+
+ err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
+ if (err)
+ goto out;
}
esw_debug(esw->dev,
@@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->ingress.drop_rule =
+ vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->ingress.drop_rule)) {
- err = PTR_ERR(vport->ingress.drop_rule);
+ if (IS_ERR(vport->ingress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->ingress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
- vport->ingress.drop_rule = NULL;
+ vport->ingress.legacy.drop_rule = NULL;
goto out;
}
+ kvfree(spec);
+ return 0;
out:
- if (err)
- esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ if (vport->egress.allowed_vlan)
+ return -EEXIST;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = flow_action;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ }
+
kvfree(spec);
return err;
}
@@ -1331,7 +1397,7 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
@@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ return err;
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
- esw_warn(esw->dev,
- "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
+ /* Drop others rule (star rule) */
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
goto out;
- }
- /* Drop others rule (star rule) */
- memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->egress.drop_rule =
+ vport->egress.legacy.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->egress.drop_rule)) {
- err = PTR_ERR(vport->egress.drop_rule);
+ if (IS_ERR(vport->egress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->egress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
- vport->egress.drop_rule = NULL;
+ vport->egress.legacy.drop_rule = NULL;
}
out:
kvfree(spec);
@@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
u16 vport_num = vport->vport;
int flags;
- if (esw->manager_vport == vport_num)
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
return;
mlx5_modify_vport_admin_state(esw->dev,
@@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
flags);
-
- /* Only legacy mode needs ACLs */
- if (esw->mode == MLX5_ESWITCH_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
}
-static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ int ret;
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
- vport->ingress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->ingress.drop_counter)) {
- esw_warn(dev,
+ /* Only non manager vports need ACL in legacy mode */
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return 0;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->ingress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
- vport->ingress.drop_counter = NULL;
+ vport->ingress.legacy.drop_counter = NULL;
}
}
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
- vport->egress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->egress.drop_counter)) {
- esw_warn(dev,
+ ret = esw_vport_ingress_config(esw, vport);
+ if (ret)
+ goto ingress_err;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->egress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter failed\n",
vport->vport);
- vport->egress.drop_counter = NULL;
+ vport->egress.legacy.drop_counter = NULL;
}
}
+
+ ret = esw_vport_egress_config(esw, vport);
+ if (ret)
+ goto egress_err;
+
+ return 0;
+
+egress_err:
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ingress_err:
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+ return ret;
}
-static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ return esw_vport_create_legacy_acl_tables(esw, vport);
+ else
+ return esw_vport_create_offloads_acl_tables(esw, vport);
+}
+
+static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
- if (vport->ingress.drop_counter)
- mlx5_fc_destroy(dev, vport->ingress.drop_counter);
- if (vport->egress.drop_counter)
- mlx5_fc_destroy(dev, vport->egress.drop_counter);
+{
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return;
+
+ esw_vport_disable_egress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+}
+
+static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_vport_destroy_legacy_acl_tables(esw, vport);
+ else
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- enum mlx5_eswitch_vport_event enabled_events)
+static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
+ int ret;
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
- esw_vport_create_drop_counters(vport);
-
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
+ ret = esw_vport_setup_acl(esw, vport);
+ if (ret)
+ goto done;
+
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
@@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
*/
- if (esw->manager_vport == vport_num ||
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
@@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
+done:
mutex_unlock(&esw->state_lock);
+ return ret;
}
static void esw_disable_vport(struct mlx5_eswitch *esw,
@@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
{
u16 vport_num = vport->vport;
+ mutex_lock(&esw->state_lock);
if (!vport->enabled)
- return;
+ goto done;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
vport->enabled = false;
- /* Wait for current already scheduled events to complete */
- flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- mutex_lock(&esw->state_lock);
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
- if (esw->manager_vport != vport_num &&
- esw->mode == MLX5_ESWITCH_LEGACY) {
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- esw_vport_destroy_drop_counters(vport);
- }
+
+ esw_vport_cleanup_acl(esw, vport);
esw->enabled_vports--;
+
+done:
mutex_unlock(&esw->state_lock);
}
@@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return NOTIFY_OK;
-
- if (vport->enabled)
+ if (!IS_ERR(vport))
queue_work(esw->work_queue, &vport->vport_change_handler);
-
return NOTIFY_OK;
}
@@ -1846,26 +1938,51 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int num_vfs;
+ int ret;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ return ret;
- /* Enable ECPF vports */
+ /* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto ecpf_err;
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto vf_err;
+ }
+ return 0;
+
+vf_err:
+ num_vfs = i - 1;
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
+ esw_disable_vport(esw, vport);
+
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_disable_vport(esw, vport);
+ }
+
+ecpf_err:
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_disable_vport(esw, vport);
+ return ret;
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
@@ -2485,12 +2602,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
- if (vport->egress.drop_counter)
- mlx5_fc_query(dev, vport->egress.drop_counter,
+ if (vport->egress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
- if (vport->ingress.drop_counter)
- mlx5_fc_query(dev, vport->ingress.drop_counter,
+ if (vport->ingress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
&stats->tx_dropped, &bytes);
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 804a7ed2b969..962888a7c3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -43,6 +43,16 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#define FDB_TC_MAX_CHAIN 3
+#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
+#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
+
+/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
+#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
+
+#define FDB_TC_MAX_PRIO 16
+#define FDB_TC_LEVELS_PER_PRIO 2
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -59,21 +69,22 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
-#define FDB_MAX_CHAIN 3
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 16
-
struct vport_ingress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_group *allow_untagged_spoofchk_grp;
- struct mlx5_flow_group *allow_spoofchk_only_grp;
- struct mlx5_flow_group *allow_untagged_only_grp;
- struct mlx5_flow_group *drop_grp;
- struct mlx5_modify_hdr *modify_metadata;
- struct mlx5_flow_handle *modify_metadata_rule;
- struct mlx5_flow_handle *allow_rule;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct mlx5_flow_handle *allow_rule;
+ struct {
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
+ struct {
+ struct mlx5_flow_group *metadata_grp;
+ struct mlx5_modify_hdr *modify_metadata;
+ struct mlx5_flow_handle *modify_metadata_rule;
+ } offloads;
};
struct vport_egress {
@@ -81,8 +92,10 @@ struct vport_egress {
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct {
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
};
struct mlx5_vport_drop_stats {
@@ -139,7 +152,6 @@ enum offloads_fdb_flags {
extern const unsigned int ESW_POOLS[4];
-#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb {
struct {
struct mlx5_flow_table *fdb;
u32 num_rules;
- } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
+ } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
/* Protects fdb_prio table */
struct mutex fdb_prio_lock;
@@ -217,8 +229,8 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
- /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
+ /* legacy data structures */
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
@@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ int table_size);
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
@@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *out, int outlen);
struct mlx5_flow_spec;
@@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action);
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
@@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
+static inline bool
+mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return esw->manager_vport == vport_num;
+}
+
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev) ?
@@ -593,11 +615,18 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
-#define FDB_MAX_CHAIN 1
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 1
-
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2276bb183705..8ba59a21a163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_CHAIN;
+ return FDB_TC_MAX_CHAIN;
return 0;
}
@@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_PRIO;
+ return FDB_TC_MAX_PRIO;
return 1;
}
@@ -599,7 +599,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
+ err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
out, sizeof(out));
if (err)
return err;
@@ -618,7 +618,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
+ return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
in, sizeof(in));
}
@@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
int table_prio, l = 0;
u32 flags = 0;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return esw->fdb_table.offloads.slow_fdb;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
+ table_prio = prio - 1;
/* create earlier levels for correct fs_core lookup when
* connecting tables
@@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
int l;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -1079,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_CAP_GEN(dev, max_flow_counter_15_0);
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
+ esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
fdb_max);
@@ -1777,9 +1777,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
- if (vport->ingress.modify_metadata_rule) {
+ if (vport->ingress.offloads.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
vport->ingress.allow_rule =
@@ -1815,11 +1815,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
MLX5_SET(set_action_in, action, data,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
- vport->ingress.modify_metadata =
+ vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1, action);
- if (IS_ERR(vport->ingress.modify_metadata)) {
- err = PTR_ERR(vport->ingress.modify_metadata);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
@@ -1827,100 +1827,76 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
- vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
- &spec, &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.modify_metadata_rule)) {
- err = PTR_ERR(vport->ingress.modify_metadata_rule);
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ vport->ingress.offloads.modify_metadata_rule =
+ mlx5_add_flow_rules(vport->ingress.acl,
+ &spec, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
goto out;
}
out:
if (err)
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
return err;
}
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (vport->ingress.modify_metadata_rule) {
- mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ if (vport->ingress.offloads.modify_metadata_rule) {
+ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
}
}
-static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
- return 0;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) prio tag packets - pop the prio tag VLAN, allow
- * Unmatched traffic is allowed by default
- */
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable egress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure prio tag egress rules\n", vport->vport);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int ret = 0;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out_no_mem;
- }
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
- /* prio tag vlan rule - pop it so VF receives untagged packets */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
esw_warn(esw->dev,
- "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
- goto out;
+ "Failed to create vport[%d] ingress metadata group, err(%d)\n",
+ vport->vport, ret);
+ goto grp_err;
}
+ vport->ingress.offloads.metadata_grp = g;
+grp_err:
+ kvfree(flow_group_in);
+ return ret;
+}
-out:
- kvfree(spec);
-out_no_mem:
- if (err)
- esw_vport_cleanup_egress_rules(esw, vport);
- return err;
+static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
+{
+ if (vport->ingress.offloads.metadata_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
+ vport->ingress.offloads.metadata_grp = NULL;
+ }
}
-static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int err;
@@ -1929,8 +1905,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return 0;
esw_vport_cleanup_ingress_rules(esw, vport);
-
- err = esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_create_ingress_acl_table(esw, vport, 1);
if (err) {
esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n",
@@ -1938,25 +1913,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return err;
}
+ err = esw_vport_create_ingress_acl_group(esw, vport);
+ if (err)
+ goto group_err;
+
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
if (err)
- goto out;
+ goto metadata_err;
}
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
- goto out;
+ goto prio_tag_err;
}
+ return 0;
-out:
+prio_tag_err:
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+metadata_err:
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+group_err:
+ esw_vport_destroy_ingress_acl_table(vport);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err)
+ return err;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
if (err)
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_egress_acl(esw, vport);
+
return err;
}
@@ -1980,54 +1995,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
- int i, j;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- err = esw_vport_ingress_common_config(esw, vport);
- if (err)
- goto err_ingress;
+ err = esw_vport_ingress_config(esw, vport);
+ if (err)
+ return err;
- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_vport_egress_prio_tag_config(esw, vport);
- if (err)
- goto err_egress;
+ if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
+ err = esw_vport_egress_config(esw, vport);
+ if (err) {
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_table(vport);
}
}
+ return err;
+}
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+ esw_vport_destroy_ingress_acl_table(vport);
+}
- return 0;
+static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
-err_egress:
- esw_vport_disable_ingress_acl(esw, vport);
-err_ingress:
- for (j = MLX5_VPORT_PF; j < i; j++) {
- vport = &esw->vports[j];
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ err = esw_vport_create_offloads_acl_tables(esw, vport);
+ if (err)
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
return err;
}
-static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
@@ -2045,7 +2065,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
- err = esw_create_offloads_acl_tables(esw);
+ err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
return err;
@@ -2070,7 +2090,7 @@ create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
return err;
}
@@ -2080,7 +2100,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
}
static void
@@ -2169,7 +2189,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ goto err_vports;
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2182,6 +2204,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
+err_vports:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 7879e1746297..366bda1bb1c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -183,7 +183,8 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
u32 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
- return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
+ return spec->flow_context.flow_source ==
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
misc_parameters.source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0246f5cdd355..2c1dcb5084ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2400,9 +2400,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
int acc_level_ns = acc_level;
prio->start_level = acc_level;
- fs_for_each_ns(ns, prio)
+ fs_for_each_ns(ns, prio) {
/* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
+
+ /* If this a prio with chains, and we can jump from one chain
+ * (namepsace) to another, so we accumulate the levels
+ */
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
+ acc_level = acc_level_ns;
+ }
+
if (!prio->num_levels)
prio->num_levels = acc_level_ns - prio->start_level;
WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
@@ -2591,58 +2599,109 @@ out_err:
steering->rdma_rx_root_ns = NULL;
return err;
}
-static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+
+/* FT and tc chains are stored in the same array so we can re-use the
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
+ * When creating a new ns for each chain store it in the first available slot.
+ * Assume tc chains are created and stored first and only then the FT chain.
+ */
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_namespace *ns)
+{
+ int chain = 0;
+
+ while (steering->fdb_sub_ns[chain])
+ ++chain;
+
+ steering->fdb_sub_ns[chain] = ns;
+}
+
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct fs_prio *maj_prio)
{
struct mlx5_flow_namespace *ns;
- struct fs_prio *maj_prio;
struct fs_prio *min_prio;
+ int prio;
+
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
+ if (IS_ERR(min_prio))
+ return PTR_ERR(min_prio);
+ }
+
+ store_fdb_sub_ns_prio_chain(steering, ns);
+
+ return 0;
+}
+
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
+ int fs_prio,
+ int chains)
+{
+ struct fs_prio *maj_prio;
int levels;
int chain;
- int prio;
int err;
- steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
- if (!steering->fdb_root_ns)
- return -ENOMEM;
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ fs_prio,
+ levels);
+ if (IS_ERR(maj_prio))
+ return PTR_ERR(maj_prio);
+
+ for (chain = 0; chain < chains; chain++) {
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
- steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
- (FDB_MAX_CHAIN + 1), GFP_KERNEL);
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
+ sizeof(*steering->fdb_sub_ns),
+ GFP_KERNEL);
if (!steering->fdb_sub_ns)
return -ENOMEM;
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
+ if (err)
+ return err;
+
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *maj_prio;
+ int err;
+
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
+ return -ENOMEM;
+
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
-
- levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
- FDB_FAST_PATH,
- levels);
- if (IS_ERR(maj_prio)) {
- err = PTR_ERR(maj_prio);
+ err = create_fdb_fast_path(steering);
+ if (err)
goto out_err;
- }
-
- for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
- if (IS_ERR(ns)) {
- err = PTR_ERR(ns);
- goto out_err;
- }
-
- for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
- min_prio = fs_create_prio(ns, prio, 2);
- if (IS_ERR(min_prio)) {
- err = PTR_ERR(min_prio);
- goto out_err;
- }
- }
-
- steering->fdb_sub_ns[chain] = ns;
- }
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index e718170a80c3..d9f4e8c59c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -555,7 +555,6 @@ mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
return mlx5_health_try_recover(dev);
}
-#define MLX5_CR_DUMP_CHUNK_SIZE 256
static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
@@ -564,8 +563,6 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
u32 crdump_size = dev->priv.health.crdump_size;
u32 *cr_data;
- u32 data_size;
- u32 offset;
int err;
if (!mlx5_core_is_pf(dev))
@@ -586,20 +583,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
goto free_data;
}
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
- if (err)
- goto free_data;
- for (offset = 0; offset < crdump_size; offset += data_size) {
- if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
- data_size = crdump_size - offset;
- else
- data_size = MLX5_CR_DUMP_CHUNK_SIZE;
- err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
- data_size);
- if (err)
- goto free_data;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
free_data:
kvfree(cr_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0059b290e095..43f97601b500 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index b99d469e4e64..249539247e2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -84,4 +84,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c9a091d3226c..31fbfd6e8bb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1168,7 +1168,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
@@ -1226,7 +1226,7 @@ function_teardown:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
if (cleanup) {
mlx5_unregister_device(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b100489dc85c..da67b28d6e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,4 +243,7 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index f641f1336402..03f037811f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -108,10 +108,10 @@ enable_vfs_hca:
return 0;
}
-static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf)
+static void
+mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int num_vfs = pci_num_vf(dev->pdev);
int err;
int vf;
@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
- mlx5_device_disable_sriov(dev, true);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
return err;
}
@@ -155,9 +155,10 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
- mlx5_device_disable_sriov(dev, true);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -192,7 +193,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return;
- mlx5_device_disable_sriov(dev, false);
+ mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
}
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b74b7d0f6590..004c56c2fc0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
break;
case DR_ACTION_TYP_MODIFY_HDR:
mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ kfree(action->rewrite.data);
refcount_dec(&action->rewrite.dmn->refcount);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 5db947df8763..c6548980daf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -154,7 +154,7 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
nic_matcher->num_of_builders =
nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
- if (!nic_matcher->ste_builder) {
+ if (!nic_matcher->num_of_builders) {
mlx5dr_dbg(matcher->tbl->dmn,
"Rule not supported on this matcher due to IP related fields\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 90c79a133692..8560460d97fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1097,6 +1097,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
+ kfree(hw_ste_arr);
+
return 0;
free_ste:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e1a90f5bddd0..e9f791c43f20 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -71,6 +71,7 @@ struct mlxsw_core {
struct list_head trans_list;
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
+ bool enable_string_tlv;
} emad;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
@@ -249,6 +250,25 @@ MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
*/
MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+/* emad_string_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x2 (string TLV).
+ */
+MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
+
+/* emad_string_tlv_len
+ * Length of the string TLV in u32.
+ */
+MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
+
+#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
+
+/* emad_string_tlv_string
+ * String provided by the device's firmware in case of erroneous register access
+ */
+MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+
/* emad_reg_tlv_type
* Type of the TLV.
* Must be set to 0x3 (register TLV).
@@ -304,6 +324,12 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
memcpy(reg_tlv + sizeof(u32), payload, reg->len);
}
+static void mlxsw_emad_pack_string_tlv(char *string_tlv)
+{
+ mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
+ mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
+}
+
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
@@ -345,7 +371,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- u64 tid)
+ u64 tid, bool enable_string_tlv)
{
char *buf;
@@ -355,26 +381,82 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
buf = skb_push(skb, reg->len + sizeof(u32));
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+ if (enable_string_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_string_tlv(buf);
+ }
+
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
+struct mlxsw_emad_tlv_offsets {
+ u16 op_tlv;
+ u16 string_tlv;
+ u16 reg_tlv;
+};
+
+static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
+}
+
+static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
+ offsets->string_tlv = 0;
+ offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+
+ /* If string TLV is present, it must come after the operation TLV. */
+ if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->string_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ }
+}
+
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->op_tlv));
+}
+
+static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ if (!offsets->string_tlv)
+ return NULL;
+
+ return ((char *) (skb->data + offsets->string_tlv));
}
static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
- MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->reg_tlv));
+}
+
+static char *mlxsw_emad_reg_payload(const char *reg_tlv)
+{
+ return ((char *) (reg_tlv + sizeof(u32)));
}
-static char *mlxsw_emad_reg_payload(const char *op_tlv)
+static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
{
- return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+ return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
}
static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
@@ -440,10 +522,31 @@ struct mlxsw_reg_trans {
const struct mlxsw_reg_info *reg;
enum mlxsw_core_reg_access_type type;
int err;
+ char *emad_err_string;
enum mlxsw_emad_op_tlv_status emad_status;
struct rcu_head rcu;
};
+static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
+ struct mlxsw_reg_trans *trans)
+{
+ char *string_tlv;
+ char *string;
+
+ string_tlv = mlxsw_emad_string_tlv(skb);
+ if (!string_tlv)
+ return;
+
+ trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
+ GFP_ATOMIC);
+ if (!trans->emad_err_string)
+ return;
+
+ string = mlxsw_emad_string_tlv_string_data(string_tlv);
+ strlcpy(trans->emad_err_string, string,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+}
+
#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
#define MLXSW_EMAD_TIMEOUT_MS 200
@@ -535,12 +638,14 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
mlxsw_emad_transmit_retry(mlxsw_core, trans);
} else {
if (err == 0) {
- char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
if (trans->cb)
trans->cb(mlxsw_core,
- mlxsw_emad_reg_payload(op_tlv),
+ mlxsw_emad_reg_payload(reg_tlv),
trans->reg->len, trans->cb_priv);
+ } else {
+ mlxsw_emad_process_string_tlv(skb, trans);
}
mlxsw_emad_trans_finish(trans, err);
}
@@ -556,6 +661,8 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
skb->data, skb->len);
+ mlxsw_emad_tlv_parse(skb);
+
if (!mlxsw_emad_is_resp(skb))
goto free_skb;
@@ -631,7 +738,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
- u16 reg_len)
+ u16 reg_len, bool enable_string_tlv)
{
struct sk_buff *skb;
u16 emad_len;
@@ -639,6 +746,8 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (enable_string_tlv)
+ emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
return NULL;
@@ -660,6 +769,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
+ bool enable_string_tlv;
struct sk_buff *skb;
int err;
@@ -667,7 +777,12 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ /* Since this can be changed during emad_reg_access, read it once and
+ * use the value all the way.
+ */
+ enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
if (!skb)
return -ENOMEM;
@@ -684,7 +799,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->reg = reg;
trans->type = type;
- mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
+ enable_string_tlv);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
@@ -1201,6 +1317,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
+ if (!reload)
+ devlink_reload_enable(devlink);
+
return 0;
err_thermal_init:
@@ -1263,6 +1382,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (!reload)
+ devlink_reload_disable(devlink);
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -1390,12 +1511,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_event_listener_item *event_listener_item = priv;
struct mlxsw_reg_info reg;
char *payload;
- char *op_tlv = mlxsw_emad_op_tlv(skb);
- char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+ char *reg_tlv;
+ char *op_tlv;
+
+ mlxsw_emad_tlv_parse(skb);
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ reg_tlv = mlxsw_emad_reg_tlv(skb);
reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
- payload = mlxsw_emad_reg_payload(op_tlv);
+ payload = mlxsw_emad_reg_payload(reg_tlv);
event_listener_item->el.func(&reg, payload, event_listener_item->priv);
dev_kfree_skb(skb);
}
@@ -1613,8 +1738,11 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_reg_trans_write);
+#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
+
static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
{
+ char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
struct mlxsw_core *mlxsw_core = trans->core;
int err;
@@ -1632,9 +1760,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
+ "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
+ trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_emad_op_tlv_status_str(trans->emad_status),
+ trans->emad_err_string ? trans->emad_err_string : "");
+
trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
- trans->emad_status,
- mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ trans->emad_status, err_string);
+
+ kfree(trans->emad_err_string);
}
list_del(&trans->bulk_list);
@@ -1708,7 +1844,7 @@ retry:
}
if (!err)
- memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
reg->len);
mlxsw_cmd_mbox_free(out_mbox);
@@ -2205,6 +2341,12 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_string_tlv = true;
+}
+EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 0d18bee6d140..543476a2e503 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -347,6 +347,8 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index a33b896f4bb8..acfbbec52424 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -19,10 +19,8 @@
enum {
MLXSW_EMAD_TLV_TYPE_END,
MLXSW_EMAD_TLV_TYPE_OP,
- MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_STRING,
MLXSW_EMAD_TLV_TYPE_REG,
- MLXSW_EMAD_TLV_TYPE_USERDATA,
- MLXSW_EMAD_TLV_TYPE_OOBETH,
};
/* OP TLV */
@@ -89,6 +87,9 @@ enum {
MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
};
+/* STRING TLV */
+#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+
/* END TLV */
#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bec035ee5349..5294a1622643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5480,6 +5480,7 @@ enum mlxsw_reg_htgt_trap_group {
enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
};
/* reg_htgt_trap_group
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ea4cc2aa99e0..556dca328bb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4079,8 +4079,10 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
sizeof(port_mapping),
GFP_KERNEL);
- if (!mlxsw_sp->port_mapping[i])
+ if (!mlxsw_sp->port_mapping[i]) {
+ err = -ENOMEM;
goto err_port_module_info_dup;
+ }
}
return 0;
@@ -4510,8 +4512,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
false),
/* L3 traps */
- MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
@@ -4538,8 +4538,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
@@ -4554,7 +4552,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
- MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
@@ -4897,6 +4894,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
if (err)
return err;
+ mlxsw_core_emad_string_tlv_enable(mlxsw_core);
+
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 0e99b64450ca..517cb8b14b1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -77,6 +77,8 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ u32 adj_discard_index;
+ bool adj_discard_index_valid;
};
struct mlxsw_sp_rif {
@@ -367,6 +369,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
+ MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
/* This is a special case of local delivery, where a packet should be
* decapsulated on reception. Note that there is no corresponding ENCAP,
@@ -4196,15 +4199,51 @@ mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
}
}
+static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
+{
+ u32 adj_discard_index = mlxsw_sp->router->adj_discard_index;
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ if (mlxsw_sp->router->adj_discard_index_valid)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_discard_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET, adj_discard_index,
+ rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ mlxsw_sp->router->adj_discard_index_valid = true;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ return err;
+}
+
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
+ int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -4214,6 +4253,15 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = fib_entry->nh_group->adj_index;
ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else if (!nh_group->adj_index_valid && nh_group->count &&
+ nh_group->nh_rif) {
+ err = mlxsw_sp_adj_discard_write(mlxsw_sp,
+ nh_group->nh_rif->rif_index);
+ if (err)
+ return err;
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = mlxsw_sp->router->adj_discard_index;
+ ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
@@ -4274,6 +4322,23 @@ static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u16 trap_id;
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -4314,6 +4379,9 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
@@ -4391,7 +4459,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* can do so with a lower priority than packets directed
* at the host, so use action type local instead of trap.
*/
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
@@ -5351,7 +5419,7 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
@@ -5909,6 +5977,16 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
+
+ /* After flushing all the routes, it is not possible anyone is still
+ * using the adjacency index that is discarding packets, so free it in
+ * case it was allocated.
+ */
+ if (!mlxsw_sp->router->adj_discard_index_valid)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ mlxsw_sp->router->adj_discard_index_valid = false;
}
static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 7c03b661ae7e..e0d7c49ffae0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -13,16 +13,27 @@
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
void *priv);
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx);
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
false, SP_##_group_id, DISCARD)
+#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
+ MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
+ _action, false, SP_##_group_id, DISCARD)
+
static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
@@ -30,6 +41,23 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -40,6 +68,28 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -54,6 +104,25 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -104,6 +173,30 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ skb_pull(skb, ETH_HLEN);
+ skb->offload_fwd_mark = 1;
+ netif_receive_skb(skb);
+}
+
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -211,6 +304,7 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
@@ -242,6 +336,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 7618f084cae9..0c1c142bb6b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -49,6 +49,7 @@ enum {
MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
@@ -66,6 +67,8 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
+ MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
@@ -73,6 +76,18 @@ enum {
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_NON_IP_PACKET = 0x160,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 57b26c2acf87..afe52463dc57 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -11,7 +11,9 @@
#include "lan743x_ptp.h"
-#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */
+#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin))
+
#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
@@ -139,19 +141,20 @@ done:
spin_unlock_bh(&ptp->tx_ts_lock);
}
-static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
{
struct lan743x_ptp *ptp = &adapter->ptp;
int result = -ENODEV;
- int index = 0;
mutex_lock(&ptp->command_lock);
- for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
- if (!(test_bit(index, &ptp->used_event_ch))) {
- ptp->used_event_ch |= BIT(index);
- result = index;
- break;
- }
+ if (!(test_bit(event_channel, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(event_channel);
+ result = event_channel;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted to reserved a used event_channel = %d\n",
+ event_channel);
}
mutex_unlock(&ptp->command_lock);
return result;
@@ -179,12 +182,62 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
+static void lan743x_led_mux_enable(struct lan743x_adapter *adapter,
+ int pin, bool enable)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ if (ptp->leds_multiplexed &&
+ ptp->led_enabled[pin]) {
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ if (enable)
+ val |= LAN743X_LED_ENABLE(pin);
+ else
+ val &= ~LAN743X_LED_ENABLE(pin);
+
+ lan743x_csr_write(adapter, HW_CFG, val);
+ }
+}
+
+static void lan743x_led_mux_save(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ for (i = 0; i < LAN7430_N_LED; i++) {
+ bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0;
+
+ ptp->led_enabled[i] = led_enabled;
+ }
+ ptp->leds_multiplexed = true;
+ } else {
+ ptp->leds_multiplexed = false;
+ }
+}
+
+static void lan743x_led_mux_restore(struct lan743x_adapter *adapter)
+{
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+
+ for (i = 0; i < LAN7430_N_LED; i++)
+ lan743x_led_mux_enable(adapter, i, true);
+ }
+}
+
static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
- int bit, int ptp_channel)
+ int pin, int event_channel)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
int ret = -EBUSY;
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
@@ -194,41 +247,44 @@ static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
gpio->output_bits |= bit_mask;
gpio->ptp_bits |= bit_mask;
+ /* assign pin to GPIO function */
+ lan743x_led_mux_enable(adapter, pin, false);
+
/* set as output, and zero initial value */
- gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
/* enable gpio, and set buffer type to push pull */
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* set 1588 polarity to high */
- gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
- if (!ptp_channel) {
+ if (event_channel == 0) {
/* use channel A */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin);
} else {
/* use channel B */
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin);
}
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
- ret = bit;
+ ret = pin;
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
return ret;
}
-static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
if (gpio->used_bits & bit_mask) {
@@ -239,21 +295,24 @@ static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
if (gpio->ptp_bits & bit_mask) {
gpio->ptp_bits &= ~bit_mask;
/* disable ptp output */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3,
gpio->gpio_cfg3);
}
/* release gpio output */
/* disable gpio */
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* reset back to input */
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* assign pin to original function */
+ lan743x_led_mux_enable(adapter, pin, true);
}
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
@@ -391,89 +450,95 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
return 0;
}
-static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter,
+ unsigned int index)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 general_config = 0;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
- if (ptp->perout_gpio_bit >= 0) {
- lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
- ptp->perout_gpio_bit = -1;
+ if (perout->gpio_pin >= 0) {
+ lan743x_gpio_release(adapter, perout->gpio_pin);
+ perout->gpio_pin = -1;
}
- if (ptp->perout_event_ch >= 0) {
+ if (perout->event_ch >= 0) {
/* set target to far in the future, effectively disabling it */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
0);
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
- lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
- ptp->perout_event_ch = -1;
+ lan743x_ptp_release_event_ch(adapter, perout->event_ch);
+ perout->event_ch = -1;
}
}
static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout_request)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 period_sec = 0, period_nsec = 0;
u32 start_sec = 0, start_nsec = 0;
u32 general_config = 0;
int pulse_width = 0;
- int perout_bit = 0;
-
- if (!on) {
- lan743x_ptp_perout_off(adapter);
+ int perout_pin = 0;
+ unsigned int index = perout_request->index;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
+
+ /* Reject requests with unsupported flags */
+ if (perout_request->flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ perout_request->index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_perout_off(adapter, index);
return 0;
}
- if (ptp->perout_event_ch >= 0 ||
- ptp->perout_gpio_bit >= 0) {
+ if (perout->event_ch >= 0 ||
+ perout->gpio_pin >= 0) {
/* already on, turn off first */
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
}
- ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
- if (ptp->perout_event_ch < 0) {
+ perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+
+ if (perout->event_ch < 0) {
netif_warn(adapter, drv, adapter->netdev,
- "Failed to reserve event channel for PEROUT\n");
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
goto failed;
}
- switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
- case ID_REV_ID_LAN7430_:
- perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
- break;
- case ID_REV_ID_LAN7431_:
- perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
- break;
- }
+ perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_pin,
+ perout->event_ch);
- ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
- perout_bit,
- ptp->perout_event_ch);
-
- if (ptp->perout_gpio_bit < 0) {
+ if (perout->gpio_pin < 0) {
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
- perout_bit);
+ perout_pin);
goto failed;
}
- start_sec = perout->start.sec;
- start_sec += perout->start.nsec / 1000000000;
- start_nsec = perout->start.nsec % 1000000000;
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
- period_sec = perout->period.sec;
- period_sec += perout->period.nsec / 1000000000;
- period_nsec = perout->period.nsec % 1000000000;
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
if (period_sec == 0) {
if (period_nsec >= 400000000) {
@@ -499,41 +564,41 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0);
/* Configure to pulse every period */
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
- (ptp->perout_event_ch));
+ (perout->event_ch));
general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
- (ptp->perout_event_ch, pulse_width);
+ (perout->event_ch, pulse_width);
general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
/* set the reload to one toggle cycle */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch),
period_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch),
period_nsec);
/* set the start time */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
start_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
start_nsec);
return 0;
failed:
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
return -ENODEV;
}
@@ -550,7 +615,7 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index == 0)
+ if (request->perout.index < ptpci->n_per_out)
return lan743x_ptp_perout(adapter, on,
&request->perout);
return -EINVAL;
@@ -568,6 +633,29 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
return 0;
}
+static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ int result = 0;
+
+ /* Confirm the requested function is supported. Parameter
+ * validation is done by the caller.
+ */
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ default:
+ result = -1;
+ break;
+ }
+ return result;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -861,12 +949,19 @@ void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
int lan743x_ptp_init(struct lan743x_adapter *adapter)
{
struct lan743x_ptp *ptp = &adapter->ptp;
+ int i;
mutex_init(&ptp->command_lock);
spin_lock_init(&ptp->tx_ts_lock);
ptp->used_event_ch = 0;
- ptp->perout_event_ch = -1;
- ptp->perout_gpio_bit = -1;
+
+ for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) {
+ ptp->perout[i].event_ch = -1;
+ ptp->perout[i].gpio_pin = -1;
+ }
+
+ lan743x_led_mux_save(adapter);
+
return 0;
}
@@ -875,6 +970,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
int ret = -ENODEV;
u32 temp;
+ int i;
+ int n_pins;
lan743x_ptp_reset(adapter);
lan743x_ptp_sync_to_system_clock(adapter);
@@ -890,10 +987,32 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
return 0;
- snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
- ptp->pin_config[0].index = 0;
- ptp->pin_config[0].func = PTP_PF_PEROUT;
- ptp->pin_config[0].chan = 0;
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ n_pins = LAN7430_N_GPIO;
+ break;
+ case ID_REV_ID_LAN7431_:
+ n_pins = LAN7431_N_GPIO;
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ "Unknown LAN743x (%08x). Assuming no GPIO\n",
+ adapter->csr.id_rev);
+ n_pins = 0;
+ break;
+ }
+
+ if (n_pins > LAN743X_PTP_N_GPIO)
+ n_pins = LAN743X_PTP_N_GPIO;
+
+ for (i = 0; i < n_pins; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
ptp->ptp_clock_info.owner = THIS_MODULE;
snprintf(ptp->ptp_clock_info.name, 16, "%pm",
@@ -901,10 +1020,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
ptp->ptp_clock_info.n_ext_ts = 0;
- ptp->ptp_clock_info.n_per_out = 1;
- ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
+ ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = 0;
- ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -913,7 +1032,7 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
- ptp->ptp_clock_info.verify = NULL;
+ ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config;
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
&adapter->pdev->dev);
@@ -939,7 +1058,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
int index;
if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
- ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
@@ -973,6 +1092,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
ptp->pending_tx_timestamps = 0;
spin_unlock_bh(&ptp->tx_ts_lock);
+ lan743x_led_mux_restore(adapter);
+
lan743x_ptp_disable(adapter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 5fc1b3cd5e33..7663bf5d2e33 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -7,6 +7,18 @@
#include "linux/ptp_clock_kernel.h"
#include "linux/netdevice.h"
+#define LAN7430_N_LED 4
+#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */
+#define LAN7431_N_GPIO 12
+
+#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO
+
+/* the number of periodic outputs is limited by number of
+ * PTP clock event channels
+ */
+#define LAN743X_PTP_N_EVENT_CHAN 2
+#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+
struct lan743x_adapter;
/* GPIO */
@@ -40,9 +52,14 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
-#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
#define PTP_FLAG_ISR_ENABLED BIT(2)
+struct lan743x_ptp_perout {
+ int event_ch; /* PTP event channel (0=channel A, 1=channel B) */
+ int gpio_pin; /* GPIO pin where output appears */
+};
+
struct lan743x_ptp {
int flags;
@@ -51,13 +68,13 @@ struct lan743x_ptp {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
- struct ptp_pin_desc pin_config[1];
+ struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO];
-#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
unsigned long used_event_ch;
+ struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
- int perout_event_ch;
- int perout_gpio_bit;
+ bool leds_multiplexed;
+ bool led_enabled[LAN7430_N_LED];
/* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
spinlock_t tx_ts_lock;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 344539c0d3aa..90c46ba763d7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -132,11 +132,11 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
-static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port)
+static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
{
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
- ANA_PORT_VCAP_S2_CFG, port->chip_port);
+ ANA_PORT_VCAP_S2_CFG, port);
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
@@ -169,117 +169,178 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_vlan_mode(struct ocelot_port *port,
+static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
netdev_features_t features)
{
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
u32 val;
/* Filtering */
val = ocelot_read(ocelot, ANA_VLANMASK);
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- val |= BIT(p);
+ val |= BIT(port);
else
- val &= ~BIT(p);
+ val &= ~BIT(port);
ocelot_write(ocelot, val, ANA_VLANMASK);
}
-static void ocelot_vlan_port_apply(struct ocelot *ocelot,
- struct ocelot_port *port)
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val;
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
- /* Default vlan to clasify for untagged frames (may be zero) */
- val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
- if (port->vlan_aware)
- val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
-
+ if (vlan_aware)
+ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+ else
+ val = 0;
ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VLAN_CFG_VLAN_VID_M |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
- ANA_PORT_VLAN_CFG, port->chip_port);
+ ANA_PORT_VLAN_CFG, port);
- /* Drop frames with multicast source address */
- val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
- if (port->vlan_aware && !port->vid)
+ if (vlan_aware && !ocelot_port->vid)
/* If port is vlan-aware and tagged, drop untagged and priority
* tagged frames.
*/
- val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ else
+ val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
-
- /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
- val = REW_TAG_CFG_TAG_TPID_CFG(0);
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
- if (port->vlan_aware) {
- if (port->vid)
+ if (vlan_aware) {
+ if (ocelot_port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
val |= REW_TAG_CFG_TAG_CFG(1);
else
/* Tag all frames */
val |= REW_TAG_CFG_TAG_CFG(3);
+ } else {
+ /* Port tagging disabled. */
+ val = REW_TAG_CFG_TAG_CFG(0);
}
ocelot_rmw_gix(ocelot, val,
- REW_TAG_CFG_TAG_TPID_CFG_M |
REW_TAG_CFG_TAG_CFG_M,
- REW_TAG_CFG, port->chip_port);
+ REW_TAG_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
- /* Set default VLAN and tag type to 8021Q. */
- val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
- REW_PORT_VLAN_CFG_PORT_VID(port->vid);
- ocelot_rmw_gix(ocelot, val,
- REW_PORT_VLAN_CFG_PORT_TPID_M |
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port->vid != vid) {
+ /* Always permit deleting the native VLAN (vid = 0) */
+ if (ocelot_port->vid && vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->vid);
+ return -EBUSY;
+ }
+ ocelot_port->vid = vid;
+ }
+
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port->chip_port);
+ REW_PORT_VLAN_CFG, port);
+
+ return 0;
}
-static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
- bool untagged)
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int ret;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- /* Add the port MAC address to with the right VLAN information */
- ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
- ENTRYTYPE_LOCKED);
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ ocelot_port->pvid = pvid;
+}
+
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
+{
+ int ret;
/* Make the port a member of the VLAN */
- ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ocelot->vlan_mask[vid] |= BIT(port);
ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
if (ret)
return ret;
/* Default ingress vlan classification */
if (pvid)
- port->pvid = vid;
+ ocelot_port_set_pvid(ocelot, port, vid);
/* Untagged egress vlan clasification */
- if (untagged && port->vid != vid) {
- if (port->vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- port->vid);
- return -EBUSY;
- }
- port->vid = vid;
+ if (untagged) {
+ ret = ocelot_port_set_native_vlan(ocelot, port, vid);
+ if (ret)
+ return ret;
}
- ocelot_vlan_port_apply(ocelot, port);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_add);
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int ret;
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (ocelot_port->pvid == vid)
+ ocelot_port_set_pvid(ocelot, port, 0);
+
+ /* Egress */
+ if (ocelot_port->vid == vid)
+ ocelot_port_set_native_vlan(ocelot, port, 0);
return 0;
}
+EXPORT_SYMBOL(ocelot_vlan_del);
static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int ret;
/* 8021q removes VID 0 on module unload for all interfaces
@@ -289,24 +350,12 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
if (vid == 0)
return 0;
- /* Del the port MAC address to with the right VLAN information */
- ocelot_mact_forget(ocelot, dev->dev_addr, vid);
-
- /* Stop the port from being a member of the vlan */
- ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
- ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ ret = ocelot_vlan_del(ocelot, port, vid);
if (ret)
return ret;
- /* Ingress */
- if (port->pvid == vid)
- port->pvid = 0;
-
- /* Egress */
- if (port->vid == vid)
- port->vid = 0;
-
- ocelot_vlan_port_apply(ocelot, port);
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
return 0;
}
@@ -333,16 +382,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
- /* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
- ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
-
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
*/
- ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+ ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_VLANMASK);
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
@@ -362,14 +406,13 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
-static void ocelot_port_adjust_link(struct net_device *dev)
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
- int speed, atop_wm, mode = 0;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int speed, mode = 0;
- switch (dev->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
speed = OCELOT_SPEED_10;
break;
@@ -385,87 +428,41 @@ static void ocelot_port_adjust_link(struct net_device *dev)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
break;
default:
- netdev_err(dev, "Unsupported PHY speed: %d\n",
- dev->phydev->speed);
+ dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
+ port, phydev->speed);
return;
}
- phy_print_status(dev->phydev);
+ phy_print_status(phydev);
- if (!dev->phydev->link)
+ if (!phydev->link)
return;
/* Only full duplex supported for now */
- ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- /* Set MAC IFG Gaps
- * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
- * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
- */
- ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG);
-
- /* Load seed (0) and set MAC HDX late collision */
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
- DEV_MAC_HDX_CFG_SEED_LOAD,
- DEV_MAC_HDX_CFG);
- mdelay(1);
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
- DEV_MAC_HDX_CFG);
-
- /* Disable HDX fast control */
- ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG);
- ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(port, 0, PCS1G_LB_CFG);
-
- /* Set Max Length and maximum tags allowed */
- ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG);
- ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
- DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
- DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
- DEV_MAC_TAGS_CFG);
+ if (ocelot->ops->pcs_init)
+ ocelot->ops->pcs_init(ocelot, port);
/* Enable MAC module */
- ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
* reset */
- ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
DEV_CLOCK_CFG);
- /* Set SMAC of Pause frame (00:00:00:00:00:00) */
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG);
-
/* No PFC */
ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, p);
-
- /* Set Pause WM hysteresis
- * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- */
- ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
- SYS_PAUSE_CFG_PAUSE_STOP(101) |
- SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
+ ANA_PFC_PFC_CFG, port);
/* Core: Enable port for frame transfer */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, p);
+ QSYS_SWITCH_PORT_MODE, port);
/* Flow control */
ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
@@ -473,64 +470,88 @@ static void ocelot_port_adjust_link(struct net_device *dev)
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
- SYS_MAC_FC_CFG, p);
- ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
-
- /* Tail dropping watermark */
- atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ;
- ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN),
- SYS_ATOP, p);
- ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+ SYS_MAC_FC_CFG, port);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
}
+EXPORT_SYMBOL(ocelot_adjust_link);
-static int ocelot_port_open(struct net_device *dev)
+static void ocelot_port_adjust_link(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int err;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_adjust_link(ocelot, port, dev->phydev);
+}
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy)
+{
/* Enable receiving frames on the port, and activate auto-learning of
* MAC addresses.
*/
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
- ANA_PORT_PORT_CFG, port->chip_port);
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_enable);
- if (port->serdes) {
- err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- port->phy_mode);
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err;
+
+ if (priv->serdes) {
+ err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
}
}
- err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- port->phy_mode);
+ err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
}
- dev->phydev = port->phy;
+ dev->phydev = priv->phy;
+
+ phy_attached_info(priv->phy);
+ phy_start(priv->phy);
+
+ ocelot_port_enable(ocelot, port, priv->phy);
- phy_attached_info(port->phy);
- phy_start(port->phy);
return 0;
}
+void ocelot_port_disable(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+EXPORT_SYMBOL(ocelot_port_disable);
+
static int ocelot_port_stop(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- phy_disconnect(port->phy);
+ phy_disconnect(priv->phy);
dev->phydev = NULL;
- ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG);
- ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, port->chip_port);
+ ocelot_port_disable(ocelot, port);
+
return 0;
}
@@ -556,13 +577,15 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ocelot_port_private *priv = netdev_priv(dev);
struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u32 val, ifh[IFH_LEN];
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 val, ifh[OCELOT_TAG_LEN / 4];
struct frame_info info = {};
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
+ int port = priv->chip_port;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -572,20 +595,20 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- info.port = BIT(port->chip_port);
+ info.port = BIT(port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
- info.rew_op = port->ptp_cmd;
- if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= (port->ts_id % 4) << 3;
+ info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (ocelot_port->ts_id % 4) << 3;
}
ocelot_gen_ifh(ifh, &info);
- for (i = 0; i < IFH_LEN; i++)
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
QS_INJ_WR, grp);
@@ -615,7 +638,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
struct ocelot_skb *oskb =
kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
@@ -625,10 +648,10 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
oskb->skb = skb;
- oskb->id = port->ts_id % 4;
- port->ts_id++;
+ oskb->id = ocelot_port->ts_id % 4;
+ ocelot_port->ts_id++;
- list_add_tail(&oskb->head, &port->skbs);
+ list_add_tail(&oskb->head, &ocelot_port->skbs);
return NETDEV_TX_OK;
}
@@ -667,25 +690,29 @@ EXPORT_SYMBOL(ocelot_get_hwtimestamp);
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_forget(port->ocelot, addr, port->pvid);
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_learn(port->ocelot, PGID_CPU, addr, port->pvid,
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
}
static void ocelot_set_rx_mode(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int i;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
u32 val;
+ int i;
/* This doesn't handle promiscuous mode because the bridge core is
* setting IFF_PROMISC on all slave interfaces and all frames would be
@@ -701,10 +728,11 @@ static void ocelot_set_rx_mode(struct net_device *dev)
static int ocelot_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ int port = priv->chip_port;
int ret;
- ret = snprintf(buf, len, "p%d", port->chip_port);
+ ret = snprintf(buf, len, "p%d", port);
if (ret >= len)
return -EINVAL;
@@ -713,15 +741,16 @@ static int ocelot_port_get_phys_port_name(struct net_device *dev,
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
- ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid,
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
@@ -730,11 +759,12 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
static void ocelot_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port),
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
SYS_STAT_CFG);
/* Get Rx stats */
@@ -765,21 +795,18 @@ static void ocelot_get_stats64(struct net_device *dev,
stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
}
-static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!vid) {
- if (!port->vlan_aware)
+ if (!vlan_aware)
/* If the bridge is not VLAN aware and no VID was
* provided, set it to pvid to ensure the MAC entry
* matches incoming untagged packets
*/
- vid = port->pvid;
+ vid = ocelot_port->pvid;
else
/* If the bridge is VLAN aware a VID must be provided as
* otherwise the learnt entry wouldn't match any frame.
@@ -787,19 +814,40 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
- ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
+EXPORT_SYMBOL(ocelot_fdb_add);
-static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
+}
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid)
+{
return ocelot_mact_forget(ocelot, addr, vid);
}
+EXPORT_SYMBOL(ocelot_fdb_del);
+
+static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
struct ocelot_dump_ctx {
struct net_device *dev;
@@ -808,9 +856,10 @@ struct ocelot_dump_ctx {
int idx;
};
-static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
- struct ocelot_dump_ctx *dump)
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
{
+ struct ocelot_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
@@ -831,12 +880,12 @@ static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
- if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
goto nla_put_failure;
nlmsg_end(dump->skb, nlh);
@@ -850,12 +899,11 @@ nla_put_failure:
return -EMSGSIZE;
}
-static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
- struct ocelot_mact_entry *entry)
+static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
+ struct ocelot_mact_entry *entry)
{
- struct ocelot *ocelot = port->ocelot;
- char mac[ETH_ALEN];
u32 val, dst, macl, mach;
+ char mac[ETH_ALEN];
/* Set row and column to read from */
ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
@@ -878,7 +926,7 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
* do not report it.
*/
dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
- if (dst != port->chip_port)
+ if (dst != port)
return -EINVAL;
/* Get the entry's MAC address and VLAN id */
@@ -898,43 +946,61 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
return 0;
}
-static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int *idx)
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- int i, j, ret = 0;
- struct ocelot_dump_ctx dump = {
- .dev = dev,
- .skb = skb,
- .cb = cb,
- .idx = *idx,
- };
-
- struct ocelot_mact_entry entry;
+ int i, j;
/* Loop through all the mac tables entries. There are 1024 rows of 4
* entries.
*/
for (i = 0; i < 1024; i++) {
for (j = 0; j < 4; j++) {
- ret = ocelot_mact_read(port, i, j, &entry);
+ struct ocelot_mact_entry entry;
+ bool is_static;
+ int ret;
+
+ ret = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
if (ret == -EINVAL)
continue;
else if (ret)
- goto end;
+ return ret;
- ret = ocelot_fdb_do_dump(&entry, &dump);
+ is_static = (entry.type == ENTRYTYPE_LOCKED);
+
+ ret = cb(entry.mac, entry.vid, is_static, data);
if (ret)
- goto end;
+ return ret;
}
}
-end:
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_fdb_dump);
+
+static int ocelot_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
+
*idx = dump.idx;
+
return ret;
}
@@ -953,18 +1019,20 @@ static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
static int ocelot_set_features(struct net_device *dev,
netdev_features_t features)
{
- struct ocelot_port *port = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
- port->tc.offload_cnt) {
+ priv->tc.offload_cnt) {
netdev_err(dev,
"Cannot disable HW TC offload while offloads active\n");
return -EBUSY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
- ocelot_vlan_mode(port, features);
+ ocelot_vlan_mode(ocelot, port, features);
return 0;
}
@@ -972,8 +1040,8 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
ppid->id_len = sizeof(ocelot->base_mac);
memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
@@ -981,17 +1049,17 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
-static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+static int ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
-
return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
}
-static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+static int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1004,16 +1072,16 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
- port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
* need to update the origin time.
*/
- port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
- port->ptp_cmd = 0;
+ ocelot_port->ptp_cmd = 0;
break;
default:
return -ERANGE;
@@ -1055,8 +1123,9 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* The function is only used for PTP operations for now */
if (!ocelot->ptp)
@@ -1064,9 +1133,9 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(port, ifr);
+ return ocelot_hwstamp_set(ocelot, port, ifr);
case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(port, ifr);
+ return ocelot_hwstamp_get(ocelot, port, ifr);
default:
return -EOPNOTSUPP;
}
@@ -1080,9 +1149,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
- .ndo_fdb_add = ocelot_fdb_add,
- .ndo_fdb_del = ocelot_fdb_del,
- .ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_fdb_add = ocelot_port_fdb_add,
+ .ndo_fdb_del = ocelot_port_fdb_del,
+ .ndo_fdb_dump = ocelot_port_fdb_dump,
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
@@ -1091,10 +1160,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_do_ioctl = ocelot_ioctl,
};
-static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
- struct ocelot_port *port = netdev_priv(netdev);
- struct ocelot *ocelot = port->ocelot;
int i;
if (sset != ETH_SS_STATS)
@@ -1104,6 +1171,17 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ struct ocelot_port_private *priv = netdev_priv(netdev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_strings(ocelot, port, sset, data);
+}
static void ocelot_update_stats(struct ocelot *ocelot)
{
@@ -1145,11 +1223,8 @@ static void ocelot_check_stats_work(struct work_struct *work)
OCELOT_STATS_CHECK_DELAY);
}
-static void ocelot_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
int i;
/* check and update now */
@@ -1157,28 +1232,42 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i];
+ *data++ = ocelot->stats[port * ocelot->num_stats + i];
}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-static int ocelot_get_sset_count(struct net_device *dev, int sset)
+static void ocelot_port_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
+
return ocelot->num_stats;
}
+EXPORT_SYMBOL(ocelot_get_sset_count);
-static int ocelot_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- if (!ocelot->ptp)
- return ethtool_op_get_ts_info(dev, info);
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1193,36 +1282,43 @@ static int ocelot_get_ts_info(struct net_device *dev,
return 0;
}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
static const struct ethtool_ops ocelot_ethtool_ops = {
- .get_strings = ocelot_get_strings,
- .get_ethtool_stats = ocelot_get_ethtool_stats,
- .get_sset_count = ocelot_get_sset_count,
+ .get_strings = ocelot_port_get_strings,
+ .get_ethtool_stats = ocelot_port_get_ethtool_stats,
+ .get_sset_count = ocelot_port_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
- .get_ts_info = ocelot_get_ts_info,
+ .get_ts_info = ocelot_port_get_ts_info,
};
-static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
- struct switchdev_trans *trans,
- u8 state)
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
u32 port_cfg;
- int port, i;
+ int p, i;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask))
- return 0;
+ if (!(BIT(port) & ocelot->bridge_mask))
+ return;
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
switch (state) {
case BR_STATE_FORWARDING:
- ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask |= BIT(port);
/* Fallthrough */
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
@@ -1230,19 +1326,18 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
default:
port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
- ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask &= ~BIT(port);
break;
}
- ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (ocelot->bridge_fwd_mask & BIT(port)) {
- unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ for (p = 0; p < ocelot->num_phys_ports; p++) {
+ if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
unsigned long bond_mask = ocelot->lags[i];
@@ -1250,78 +1345,93 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
if (!bond_mask)
continue;
- if (bond_mask & BIT(port)) {
+ if (bond_mask & BIT(p)) {
mask &= ~bond_mask;
break;
}
}
- ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports) | mask,
- ANA_PGID_PGID, PGID_SRC + port);
+ /* Avoid the NPI port from looping back to itself */
+ if (p != ocelot->cpu)
+ mask |= BIT(ocelot->cpu);
+
+ ocelot_write_rix(ocelot, mask,
+ ANA_PGID_PGID, PGID_SRC + p);
} else {
/* Only the CPU port, this is compatible with link
* aggregation.
*/
ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports),
- ANA_PGID_PGID, PGID_SRC + port);
+ BIT(ocelot->cpu),
+ ANA_PGID_PGID, PGID_SRC + p);
}
}
+}
+EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
- return 0;
+static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+
+ ocelot_bridge_stp_state_set(ocelot, port, state);
}
-static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port,
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
+{
+ ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
+ ANA_AUTOAGE);
+}
+EXPORT_SYMBOL(ocelot_set_ageing_time);
+
+static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2),
- ANA_AUTOAGE);
+ ocelot_set_ageing_time(ocelot, ageing_time);
}
-static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc)
+static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
{
- struct ocelot *ocelot = port->ocelot;
- u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG,
- port->chip_port);
+ u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ u32 val = 0;
if (mc)
- val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
- else
- val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA);
+ val = cpu_fwd_mcast;
- ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port);
+ ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
+ ANA_PORT_CPU_FWD_CFG, port);
}
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot_port, trans,
+ ocelot_port_attr_stp_state_set(ocelot, port, trans,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
+ ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port->vlan_aware = attr->u.vlan_filtering;
- ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ priv->vlan_aware = attr->u.vlan_filtering;
+ ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
+ ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
default:
err = -EOPNOTSUPP;
@@ -1383,15 +1493,17 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb,
struct switchdev_trans *trans)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
bool new = false;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -1415,7 +1527,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
ocelot_mact_forget(ocelot, addr, vid);
}
- mc->ports |= BIT(port->chip_port);
+ mc->ports |= BIT(port);
addr[2] = mc->ports << 0;
addr[1] = mc->ports << 8;
@@ -1425,14 +1537,16 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
static int ocelot_port_obj_del_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1444,7 +1558,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
addr[0] = 0;
ocelot_mact_forget(ocelot, addr, vid);
- mc->ports &= ~BIT(port->chip_port);
+ mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
devm_kfree(ocelot->dev, mc);
@@ -1501,11 +1615,9 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
if (!ocelot->bridge_mask) {
ocelot->hw_bridge_dev = bridge;
} else {
@@ -1515,26 +1627,25 @@ static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
return -ENODEV;
}
- ocelot->bridge_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask |= BIT(port);
return 0;
}
+EXPORT_SYMBOL(ocelot_port_bridge_join);
-static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask &= ~BIT(port);
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
- ocelot_port->vlan_aware = 0;
- ocelot_port->pvid = 0;
- ocelot_port->vid = 0;
+ ocelot_port_vlan_filtering(ocelot, port, 0);
+ ocelot_port_set_pvid(ocelot, port, 0);
+ return ocelot_port_set_native_vlan(ocelot, port, 0);
}
+EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
@@ -1594,20 +1705,18 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
}
}
-static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+static int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
- int lag, lp;
struct net_device *ndev;
u32 bond_mask = 0;
+ int lag, lp;
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond, ndev) {
- struct ocelot_port *port = netdev_priv(ndev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
- bond_mask |= BIT(port->chip_port);
+ bond_mask |= BIT(priv->chip_port);
}
rcu_read_unlock();
@@ -1616,17 +1725,17 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
/* If the new port is the lowest one, use it as the logical port from
* now on
*/
- if (p == lp) {
- lag = p;
- ocelot->lags[p] = bond_mask;
- bond_mask &= ~BIT(p);
+ if (port == lp) {
+ lag = port;
+ ocelot->lags[port] = bond_mask;
+ bond_mask &= ~BIT(port);
if (bond_mask) {
lp = __ffs(bond_mask);
ocelot->lags[lp] = 0;
}
} else {
lag = lp;
- ocelot->lags[lp] |= BIT(p);
+ ocelot->lags[lp] |= BIT(port);
}
ocelot_setup_lag(ocelot, lag);
@@ -1635,34 +1744,32 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
return 0;
}
-static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+static void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
u32 port_cfg;
int i;
/* Remove port from any lag */
for (i = 0; i < ocelot->num_phys_ports; i++)
- ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+ ocelot->lags[i] &= ~BIT(port);
/* if it was the logical port of the lag, move the lag config to the
* next port
*/
- if (ocelot->lags[p]) {
- int n = __ffs(ocelot->lags[p]);
+ if (ocelot->lags[port]) {
+ int n = __ffs(ocelot->lags[port]);
- ocelot->lags[n] = ocelot->lags[p];
- ocelot->lags[p] = 0;
+ ocelot->lags[n] = ocelot->lags[port];
+ ocelot->lags[port] = 0;
ocelot_setup_lag(ocelot, n);
}
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
- ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
- ANA_PORT_PORT_CFG, p);
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
ocelot_set_aggr_pgids(ocelot);
}
@@ -1677,31 +1784,30 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
unsigned long event,
struct netdev_notifier_changeupper_info *info)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
int err = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
switch (event) {
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking)
- err = ocelot_port_bridge_join(ocelot_port,
+ if (info->linking) {
+ err = ocelot_port_bridge_join(ocelot, port,
info->upper_dev);
- else
- ocelot_port_bridge_leave(ocelot_port,
- info->upper_dev);
-
- ocelot_vlan_port_apply(ocelot_port->ocelot,
- ocelot_port);
+ } else {
+ err = ocelot_port_bridge_leave(ocelot, port,
+ info->upper_dev);
+ priv->vlan_aware = false;
+ }
}
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_port_lag_join(ocelot_port,
+ err = ocelot_port_lag_join(ocelot, port,
info->upper_dev);
else
- ocelot_port_lag_leave(ocelot_port,
+ ocelot_port_lag_leave(ocelot, port,
info->upper_dev);
}
break;
@@ -1719,12 +1825,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (!ocelot_netdevice_dev_check(dev))
+ return 0;
+
if (event == NETDEV_PRECHANGEUPPER &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
- if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ if (lag_upper_info &&
+ lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
extack = netdev_notifier_info_to_extack(&info->info);
NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
@@ -2000,24 +2110,97 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
return 0;
}
+static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int atop_wm;
+
+ ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+
+ /* Set Pause WM hysteresis
+ * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+ */
+ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
+ SYS_PAUSE_CFG_PAUSE_STOP(101) |
+ SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
+
+ /* Tail dropping watermark */
+ atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+ SYS_ATOP, port);
+ ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+}
+
+void ocelot_init_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ INIT_LIST_HEAD(&ocelot_port->skbs);
+
+ /* Basic L2 initialization */
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
+ DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+ ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* Drop frames with multicast source address */
+ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
+ REW_PORT_VLAN_CFG_PORT_TPID_M,
+ REW_PORT_VLAN_CFG, port);
+
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, port);
+}
+EXPORT_SYMBOL(ocelot_init_port);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
{
+ struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct net_device *dev;
int err;
- dev = alloc_etherdev(sizeof(struct ocelot_port));
+ dev = alloc_etherdev(sizeof(struct ocelot_port_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, ocelot->dev);
- ocelot_port = netdev_priv(dev);
- ocelot_port->dev = dev;
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->phy = phy;
+ priv->chip_port = port;
+ ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
ocelot_port->regs = regs;
- ocelot_port->chip_port = port;
- ocelot_port->phy = phy;
ocelot->ports[port] = ocelot_port;
dev->netdev_ops = &ocelot_port_netdev_ops;
@@ -2032,33 +2215,81 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
- INIT_LIST_HEAD(&ocelot_port->skbs);
+ ocelot_init_port(ocelot, port);
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto err_register_netdev;
+ free_netdev(dev);
}
- /* Basic L2 initialization */
- ocelot_vlan_port_apply(ocelot, ocelot_port);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_probe_port);
- /* Enable vcap lookups */
- ocelot_vcap_enable(ocelot, ocelot_port);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
+{
+ /* Configure and enable the CPU port. */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
- return 0;
+ /* If the CPU port is a physical port, set up the port in Node
+ * Processor Interface (NPI) mode. This is the mode through which
+ * frames can be injected from and extracted to an external CPU.
+ * Only one port can be an NPI at the same time.
+ */
+ if (cpu < ocelot->num_phys_ports) {
+ int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
-err_register_netdev:
- free_netdev(dev);
- return err;
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG);
+
+ if (injection == OCELOT_TAG_PREFIX_SHORT)
+ mtu += OCELOT_SHORT_PREFIX_LEN;
+ else if (injection == OCELOT_TAG_PREFIX_LONG)
+ mtu += OCELOT_LONG_PREFIX_LEN;
+
+ ocelot_port_set_mtu(ocelot, cpu, mtu);
+ }
+
+ /* CPU port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, cpu);
+ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, cpu);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, cpu);
+
+ ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_probe_port);
+EXPORT_SYMBOL(ocelot_set_cpu_port);
int ocelot_init(struct ocelot *ocelot)
{
- u32 port;
- int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ int i, ret;
+ u32 port;
+
+ if (ocelot->ops->reset) {
+ ret = ocelot->ops->reset(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev, "Switch reset failed\n");
+ return ret;
+ }
+ }
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(u32), GFP_KERNEL);
@@ -2080,6 +2311,7 @@ int ocelot_init(struct ocelot *ocelot)
if (!ocelot->stats_queue)
return -ENOMEM;
+ INIT_LIST_HEAD(&ocelot->multicast);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_ace_init(ocelot);
@@ -2137,13 +2369,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Configure and enable the CPU port. */
- ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
- ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
- ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
- ANA_PORT_PORT_CFG, cpu);
-
/* Allow broadcast MAC frames. */
for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
@@ -2156,13 +2381,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
- /* CPU port Injection/Extraction configuration */
- ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
- QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
- QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, cpu);
- ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) |
- SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e40773c01a44..32fef4f495aa 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,12 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
-#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
@@ -43,8 +44,6 @@
#define OCELOT_PTP_QUEUE_SZ 128
-#define IFH_LEN 4
-
struct frame_info {
u32 len;
u16 port;
@@ -54,372 +53,6 @@ struct frame_info {
u32 timestamp; /* rew_val */
};
-#define IFH_INJ_BYPASS BIT(31)
-#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
-
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
-#define OCELOT_SPEED_2500 0
-#define OCELOT_SPEED_1000 1
-#define OCELOT_SPEED_100 2
-#define OCELOT_SPEED_10 3
-
-#define TARGET_OFFSET 24
-#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
-#define REG(reg, offset) [reg & REG_MASK] = offset
-
-enum ocelot_target {
- ANA = 1,
- QS,
- QSYS,
- REW,
- SYS,
- S2,
- HSIO,
- PTP,
- TARGET_MAX,
-};
-
-enum ocelot_reg {
- ANA_ADVLEARN = ANA << TARGET_OFFSET,
- ANA_VLANMASK,
- ANA_PORT_B_DOMAIN,
- ANA_ANAGEFIL,
- ANA_ANEVENTS,
- ANA_STORMLIMIT_BURST,
- ANA_STORMLIMIT_CFG,
- ANA_ISOLATED_PORTS,
- ANA_COMMUNITY_PORTS,
- ANA_AUTOAGE,
- ANA_MACTOPTIONS,
- ANA_LEARNDISC,
- ANA_AGENCTRL,
- ANA_MIRRORPORTS,
- ANA_EMIRRORPORTS,
- ANA_FLOODING,
- ANA_FLOODING_IPMC,
- ANA_SFLOW_CFG,
- ANA_PORT_MODE,
- ANA_CUT_THRU_CFG,
- ANA_PGID_PGID,
- ANA_TABLES_ANMOVED,
- ANA_TABLES_MACHDATA,
- ANA_TABLES_MACLDATA,
- ANA_TABLES_STREAMDATA,
- ANA_TABLES_MACACCESS,
- ANA_TABLES_MACTINDX,
- ANA_TABLES_VLANACCESS,
- ANA_TABLES_VLANTIDX,
- ANA_TABLES_ISDXACCESS,
- ANA_TABLES_ISDXTIDX,
- ANA_TABLES_ENTRYLIM,
- ANA_TABLES_PTP_ID_HIGH,
- ANA_TABLES_PTP_ID_LOW,
- ANA_TABLES_STREAMACCESS,
- ANA_TABLES_STREAMTIDX,
- ANA_TABLES_SEQ_HISTORY,
- ANA_TABLES_SEQ_MASK,
- ANA_TABLES_SFID_MASK,
- ANA_TABLES_SFIDACCESS,
- ANA_TABLES_SFIDTIDX,
- ANA_MSTI_STATE,
- ANA_OAM_UPM_LM_CNT,
- ANA_SG_ACCESS_CTRL,
- ANA_SG_CONFIG_REG_1,
- ANA_SG_CONFIG_REG_2,
- ANA_SG_CONFIG_REG_3,
- ANA_SG_CONFIG_REG_4,
- ANA_SG_CONFIG_REG_5,
- ANA_SG_GCL_GS_CONFIG,
- ANA_SG_GCL_TI_CONFIG,
- ANA_SG_STATUS_REG_1,
- ANA_SG_STATUS_REG_2,
- ANA_SG_STATUS_REG_3,
- ANA_PORT_VLAN_CFG,
- ANA_PORT_DROP_CFG,
- ANA_PORT_QOS_CFG,
- ANA_PORT_VCAP_CFG,
- ANA_PORT_VCAP_S1_KEY_CFG,
- ANA_PORT_VCAP_S2_CFG,
- ANA_PORT_PCP_DEI_MAP,
- ANA_PORT_CPU_FWD_CFG,
- ANA_PORT_CPU_FWD_BPDU_CFG,
- ANA_PORT_CPU_FWD_GARP_CFG,
- ANA_PORT_CPU_FWD_CCM_CFG,
- ANA_PORT_PORT_CFG,
- ANA_PORT_POL_CFG,
- ANA_PORT_PTP_CFG,
- ANA_PORT_PTP_DLY1_CFG,
- ANA_PORT_PTP_DLY2_CFG,
- ANA_PORT_SFID_CFG,
- ANA_PFC_PFC_CFG,
- ANA_PFC_PFC_TIMER,
- ANA_IPT_OAM_MEP_CFG,
- ANA_IPT_IPT,
- ANA_PPT_PPT,
- ANA_FID_MAP_FID_MAP,
- ANA_AGGR_CFG,
- ANA_CPUQ_CFG,
- ANA_CPUQ_CFG2,
- ANA_CPUQ_8021_CFG,
- ANA_DSCP_CFG,
- ANA_DSCP_REWR_CFG,
- ANA_VCAP_RNG_TYPE_CFG,
- ANA_VCAP_RNG_VAL_CFG,
- ANA_VRAP_CFG,
- ANA_VRAP_HDR_DATA,
- ANA_VRAP_HDR_MASK,
- ANA_DISCARD_CFG,
- ANA_FID_CFG,
- ANA_POL_PIR_CFG,
- ANA_POL_CIR_CFG,
- ANA_POL_MODE_CFG,
- ANA_POL_PIR_STATE,
- ANA_POL_CIR_STATE,
- ANA_POL_STATE,
- ANA_POL_FLOWC,
- ANA_POL_HYST,
- ANA_POL_MISC_CFG,
- QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
- QS_XTR_RD,
- QS_XTR_FRM_PRUNING,
- QS_XTR_FLUSH,
- QS_XTR_DATA_PRESENT,
- QS_XTR_CFG,
- QS_INJ_GRP_CFG,
- QS_INJ_WR,
- QS_INJ_CTRL,
- QS_INJ_STATUS,
- QS_INJ_ERR,
- QS_INH_DBG,
- QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
- QSYS_SWITCH_PORT_MODE,
- QSYS_STAT_CNT_CFG,
- QSYS_EEE_CFG,
- QSYS_EEE_THRES,
- QSYS_IGR_NO_SHARING,
- QSYS_EGR_NO_SHARING,
- QSYS_SW_STATUS,
- QSYS_EXT_CPU_CFG,
- QSYS_PAD_CFG,
- QSYS_CPU_GROUP_MAP,
- QSYS_QMAP,
- QSYS_ISDX_SGRP,
- QSYS_TIMED_FRAME_ENTRY,
- QSYS_TFRM_MISC,
- QSYS_TFRM_PORT_DLY,
- QSYS_TFRM_TIMER_CFG_1,
- QSYS_TFRM_TIMER_CFG_2,
- QSYS_TFRM_TIMER_CFG_3,
- QSYS_TFRM_TIMER_CFG_4,
- QSYS_TFRM_TIMER_CFG_5,
- QSYS_TFRM_TIMER_CFG_6,
- QSYS_TFRM_TIMER_CFG_7,
- QSYS_TFRM_TIMER_CFG_8,
- QSYS_RED_PROFILE,
- QSYS_RES_QOS_MODE,
- QSYS_RES_CFG,
- QSYS_RES_STAT,
- QSYS_EGR_DROP_MODE,
- QSYS_EQ_CTRL,
- QSYS_EVENTS_CORE,
- QSYS_QMAXSDU_CFG_0,
- QSYS_QMAXSDU_CFG_1,
- QSYS_QMAXSDU_CFG_2,
- QSYS_QMAXSDU_CFG_3,
- QSYS_QMAXSDU_CFG_4,
- QSYS_QMAXSDU_CFG_5,
- QSYS_QMAXSDU_CFG_6,
- QSYS_QMAXSDU_CFG_7,
- QSYS_PREEMPTION_CFG,
- QSYS_CIR_CFG,
- QSYS_EIR_CFG,
- QSYS_SE_CFG,
- QSYS_SE_DWRR_CFG,
- QSYS_SE_CONNECT,
- QSYS_SE_DLB_SENSE,
- QSYS_CIR_STATE,
- QSYS_EIR_STATE,
- QSYS_SE_STATE,
- QSYS_HSCH_MISC_CFG,
- QSYS_TAG_CONFIG,
- QSYS_TAS_PARAM_CFG_CTRL,
- QSYS_PORT_MAX_SDU,
- QSYS_PARAM_CFG_REG_1,
- QSYS_PARAM_CFG_REG_2,
- QSYS_PARAM_CFG_REG_3,
- QSYS_PARAM_CFG_REG_4,
- QSYS_PARAM_CFG_REG_5,
- QSYS_GCL_CFG_REG_1,
- QSYS_GCL_CFG_REG_2,
- QSYS_PARAM_STATUS_REG_1,
- QSYS_PARAM_STATUS_REG_2,
- QSYS_PARAM_STATUS_REG_3,
- QSYS_PARAM_STATUS_REG_4,
- QSYS_PARAM_STATUS_REG_5,
- QSYS_PARAM_STATUS_REG_6,
- QSYS_PARAM_STATUS_REG_7,
- QSYS_PARAM_STATUS_REG_8,
- QSYS_PARAM_STATUS_REG_9,
- QSYS_GCL_STATUS_REG_1,
- QSYS_GCL_STATUS_REG_2,
- REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
- REW_TAG_CFG,
- REW_PORT_CFG,
- REW_DSCP_CFG,
- REW_PCP_DEI_QOS_MAP_CFG,
- REW_PTP_CFG,
- REW_PTP_DLY1_CFG,
- REW_RED_TAG_CFG,
- REW_DSCP_REMAP_DP1_CFG,
- REW_DSCP_REMAP_CFG,
- REW_STAT_CFG,
- REW_REW_STICKY,
- REW_PPT,
- SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
- SYS_COUNT_RX_UNICAST,
- SYS_COUNT_RX_MULTICAST,
- SYS_COUNT_RX_BROADCAST,
- SYS_COUNT_RX_SHORTS,
- SYS_COUNT_RX_FRAGMENTS,
- SYS_COUNT_RX_JABBERS,
- SYS_COUNT_RX_CRC_ALIGN_ERRS,
- SYS_COUNT_RX_SYM_ERRS,
- SYS_COUNT_RX_64,
- SYS_COUNT_RX_65_127,
- SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
- SYS_COUNT_RX_1024_1526,
- SYS_COUNT_RX_1527_MAX,
- SYS_COUNT_RX_PAUSE,
- SYS_COUNT_RX_CONTROL,
- SYS_COUNT_RX_LONGS,
- SYS_COUNT_RX_CLASSIFIED_DROPS,
- SYS_COUNT_TX_OCTETS,
- SYS_COUNT_TX_UNICAST,
- SYS_COUNT_TX_MULTICAST,
- SYS_COUNT_TX_BROADCAST,
- SYS_COUNT_TX_COLLISION,
- SYS_COUNT_TX_DROPS,
- SYS_COUNT_TX_PAUSE,
- SYS_COUNT_TX_64,
- SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
- SYS_COUNT_TX_512_1023,
- SYS_COUNT_TX_1024_1526,
- SYS_COUNT_TX_1527_MAX,
- SYS_COUNT_TX_AGING,
- SYS_RESET_CFG,
- SYS_SR_ETYPE_CFG,
- SYS_VLAN_ETYPE_CFG,
- SYS_PORT_MODE,
- SYS_FRONT_PORT_MODE,
- SYS_FRM_AGING,
- SYS_STAT_CFG,
- SYS_SW_STATUS,
- SYS_MISC_CFG,
- SYS_REW_MAC_HIGH_CFG,
- SYS_REW_MAC_LOW_CFG,
- SYS_TIMESTAMP_OFFSET,
- SYS_CMID,
- SYS_PAUSE_CFG,
- SYS_PAUSE_TOT_CFG,
- SYS_ATOP,
- SYS_ATOP_TOT_CFG,
- SYS_MAC_FC_CFG,
- SYS_MMGT,
- SYS_MMGT_FAST,
- SYS_EVENTS_DIF,
- SYS_EVENTS_CORE,
- SYS_CNT,
- SYS_PTP_STATUS,
- SYS_PTP_TXSTAMP,
- SYS_PTP_NXT,
- SYS_PTP_CFG,
- SYS_RAM_INIT,
- SYS_CM_ADDR,
- SYS_CM_DATA_WR,
- SYS_CM_DATA_RD,
- SYS_CM_OP,
- SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
- PTP_PIN_CFG = PTP << TARGET_OFFSET,
- PTP_PIN_TOD_SEC_MSB,
- PTP_PIN_TOD_SEC_LSB,
- PTP_PIN_TOD_NSEC,
- PTP_CFG_MISC,
- PTP_CLK_CFG_ADJ_CFG,
- PTP_CLK_CFG_ADJ_FREQ,
-};
-
-enum ocelot_regfield {
- ANA_ADVLEARN_VLAN_CHK,
- ANA_ADVLEARN_LEARN_MIRROR,
- ANA_ANEVENTS_FLOOD_DISCARD,
- ANA_ANEVENTS_MSTI_DROP,
- ANA_ANEVENTS_ACLKILL,
- ANA_ANEVENTS_ACLUSED,
- ANA_ANEVENTS_AUTOAGE,
- ANA_ANEVENTS_VS2TTL1,
- ANA_ANEVENTS_STORM_DROP,
- ANA_ANEVENTS_LEARN_DROP,
- ANA_ANEVENTS_AGED_ENTRY,
- ANA_ANEVENTS_CPU_LEARN_FAILED,
- ANA_ANEVENTS_AUTO_LEARN_FAILED,
- ANA_ANEVENTS_LEARN_REMOVE,
- ANA_ANEVENTS_AUTO_LEARNED,
- ANA_ANEVENTS_AUTO_MOVED,
- ANA_ANEVENTS_DROPPED,
- ANA_ANEVENTS_CLASSIFIED_DROP,
- ANA_ANEVENTS_CLASSIFIED_COPY,
- ANA_ANEVENTS_VLAN_DISCARD,
- ANA_ANEVENTS_FWD_DISCARD,
- ANA_ANEVENTS_MULTICAST_FLOOD,
- ANA_ANEVENTS_UNICAST_FLOOD,
- ANA_ANEVENTS_DEST_KNOWN,
- ANA_ANEVENTS_BUCKET3_MATCH,
- ANA_ANEVENTS_BUCKET2_MATCH,
- ANA_ANEVENTS_BUCKET1_MATCH,
- ANA_ANEVENTS_BUCKET0_MATCH,
- ANA_ANEVENTS_CPU_OPERATION,
- ANA_ANEVENTS_DMAC_LOOKUP,
- ANA_ANEVENTS_SMAC_LOOKUP,
- ANA_ANEVENTS_SEQ_GEN_ERR_0,
- ANA_ANEVENTS_SEQ_GEN_ERR_1,
- ANA_TABLES_MACACCESS_B_DOM,
- ANA_TABLES_MACTINDX_BUCKET,
- ANA_TABLES_MACTINDX_M_INDEX,
- QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
- QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
- QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
- SYS_RESET_CFG_CORE_ENA,
- SYS_RESET_CFG_MEM_ENA,
- SYS_RESET_CFG_MEM_INIT,
- REGFIELD_MAX
-};
-
-enum ocelot_clk_pins {
- ALT_PPS_PIN = 1,
- EXT_CLK_PIN,
- ALT_LDST_PIN,
- TOD_ACC_PIN
-};
-
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -427,82 +60,18 @@ struct ocelot_multicast {
u16 ports;
};
-struct ocelot_port;
-
-struct ocelot_stat_layout {
- u32 offset;
- char name[ETH_GSTRING_LEN];
-};
-
-struct ocelot {
- struct device *dev;
-
- struct regmap *targets[TARGET_MAX];
- struct regmap_field *regfields[REGFIELD_MAX];
- const u32 *const *map;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
-
- u8 base_mac[ETH_ALEN];
-
- struct net_device *hw_bridge_dev;
- u16 bridge_mask;
- u16 bridge_fwd_mask;
-
- struct workqueue_struct *ocelot_owq;
-
- int shared_queue_sz;
-
- u8 num_phys_ports;
- u8 num_cpu_ports;
- struct ocelot_port **ports;
-
- u32 *lags;
-
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
-
- struct list_head multicast;
-
- /* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
- u64 *stats;
- struct delayed_work stats_work;
- struct workqueue_struct *stats_queue;
-
- u8 ptp:1;
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
- struct mutex ptp_lock; /* Protects the PTP interface state */
- spinlock_t ptp_clock_lock; /* Protects the PTP clock */
-};
-
-struct ocelot_port {
+struct ocelot_port_private {
+ struct ocelot_port port;
struct net_device *dev;
- struct ocelot *ocelot;
struct phy_device *phy;
- void __iomem *regs;
u8 chip_port;
- /* Ingress default VLAN (pvid) */
- u16 pvid;
-
- /* Egress default VLAN (vid) */
- u16 vid;
-
u8 vlan_aware;
- u64 *stats;
-
phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
-
- u8 ptp_cmd;
- struct list_head skbs;
- u8 ts_id;
};
struct ocelot_skb {
@@ -511,49 +80,26 @@ struct ocelot_skb {
u8 id;
};
-u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
-
-void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
-#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask,
- u32 offset);
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
-#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
-
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
-int ocelot_regfields_init(struct ocelot *ocelot,
- const struct reg_field *const regfields);
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name);
-
#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-int ocelot_init(struct ocelot *ocelot);
-void ocelot_deinit(struct ocelot *ocelot);
-int ocelot_chip_init(struct ocelot *ocelot);
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops);
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
-void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index e98944c87259..c08e3e8482e7 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -224,9 +224,9 @@ int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index aac115136720..5541ec26f953 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -95,6 +95,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
do {
struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
u64 tod_in_ns, full_ts_in_ns;
struct frame_info info = {};
struct net_device *dev;
@@ -103,7 +105,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
int sz, len, buf_len;
struct sk_buff *skb;
- for (i = 0; i < IFH_LEN; i++) {
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
if (err != 4)
break;
@@ -114,7 +116,10 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
ocelot_parse_ifh(ifh, &info);
- dev = ocelot->ports[info.port]->dev;
+ ocelot_port = ocelot->ports[info.port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
skb = netdev_alloc_skb(dev, info.len);
@@ -249,6 +254,57 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+}
+
+static int ocelot_reset(struct ocelot *ocelot)
+{
+ int retries = 100;
+ u32 val;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val && --retries);
+
+ if (!retries)
+ return -ETIMEDOUT;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ return 0;
+}
+
+static const struct ocelot_ops ocelot_ops = {
+ .pcs_init = ocelot_port_pcs_init,
+ .reset = ocelot_reset,
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -257,13 +313,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
- u32 val;
struct {
enum ocelot_target id;
char *name;
u8 optional:1;
- } res[] = {
+ } io_target[] = {
{ SYS, "sys" },
{ REW, "rew" },
{ QSYS, "qsys" },
@@ -283,20 +338,23 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
- for (i = 0; i < ARRAY_SIZE(res); i++) {
+ for (i = 0; i < ARRAY_SIZE(io_target); i++) {
struct regmap *target;
+ struct resource *res;
- target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ io_target[i].name);
+
+ target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
- if (res[i].optional) {
- ocelot->targets[res[i].id] = NULL;
+ if (io_target[i].optional) {
+ ocelot->targets[io_target[i].id] = NULL;
continue;
}
-
return PTR_ERR(target);
}
- ocelot->targets[res[i].id] = target;
+ ocelot->targets[io_target[i].id] = target;
}
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
@@ -307,7 +365,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[HSIO] = hsio;
- err = ocelot_chip_init(ocelot);
+ err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
return err;
@@ -334,18 +392,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
-
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val);
-
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-
ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
ports = of_get_child_by_name(np, "ethernet-ports");
@@ -359,17 +405,20 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
- INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
+ ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
+ OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ phy_interface_t phy_mode;
struct phy_device *phy;
struct resource *res;
struct phy *serdes;
void __iomem *regs;
char res_name[8];
- int phy_mode;
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
@@ -398,13 +447,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- phy_mode = of_get_phy_mode(portnp);
- if (phy_mode < 0)
- ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
- else
- ocelot->ports[port]->phy_mode = phy_mode;
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ priv->phy_mode = phy_mode;
- switch (ocelot->ports[port]->phy_mode) {
+ switch (priv->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
@@ -413,7 +464,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
/* Ensure clock signals and speed is set on all
* QSGMII links
*/
- ocelot_port_writel(ocelot->ports[port],
+ ocelot_port_writel(ocelot_port,
DEV_CLOCK_CFG_LINK_SPEED
(OCELOT_SPEED_1000),
DEV_CLOCK_CFG);
@@ -441,7 +492,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- ocelot->ports[port]->serdes = serdes;
+ priv->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index b894bc0c9c16..3d65b99b9734 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -10,7 +10,7 @@
struct ocelot_port_block {
struct ocelot_acl_block *block;
- struct ocelot_port *port;
+ struct ocelot_port_private *priv;
};
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
@@ -177,8 +177,8 @@ struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
if (!rule)
return NULL;
- rule->port = block->port;
- rule->chip_port = block->port->chip_port;
+ rule->port = &block->priv->port;
+ rule->chip_port = block->priv->chip_port;
return rule;
}
@@ -202,7 +202,7 @@ static int ocelot_flower_replace(struct flow_cls_offload *f,
if (ret)
return ret;
- port_block->port->tc.offload_cnt++;
+ port_block->priv->tc.offload_cnt++;
return 0;
}
@@ -213,14 +213,14 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_offload_del(&rule);
if (ret)
return ret;
- port_block->port->tc.offload_cnt--;
+ port_block->priv->tc.offload_cnt--;
return 0;
}
@@ -231,7 +231,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_stats_update(&rule);
if (ret)
@@ -261,7 +261,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
{
struct ocelot_port_block *port_block = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -275,7 +275,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
}
static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port *port)
+ocelot_port_block_create(struct ocelot_port_private *priv)
{
struct ocelot_port_block *port_block;
@@ -283,7 +283,7 @@ ocelot_port_block_create(struct ocelot_port *port)
if (!port_block)
return NULL;
- port_block->port = port;
+ port_block->priv = priv;
return port_block;
}
@@ -300,7 +300,7 @@ static void ocelot_tc_block_unbind(void *cb_priv)
ocelot_port_block_destroy(port_block);
}
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct ocelot_port_block *port_block;
@@ -311,14 +311,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
return -EOPNOTSUPP;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb) {
- port_block = ocelot_port_block_create(port);
+ port_block = ocelot_port_block_create(priv);
if (!port_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- port, port_block,
+ priv, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
ret = PTR_ERR(block_cb);
@@ -339,13 +339,13 @@ err_cb_register:
return ret;
}
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb)
return;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index c6db8ad31fdf..b229b1cb68ef 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -97,20 +97,16 @@ static struct regmap_config ocelot_regmap_config = {
.reg_stride = 4,
};
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name)
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
{
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(regs))
return ERR_CAST(regs);
- ocelot_regmap_config.name = name;
- return devm_regmap_init_mmio(ocelot->dev, regs,
- &ocelot_regmap_config);
+ ocelot_regmap_config.name = res->name;
+
+ return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
-EXPORT_SYMBOL(ocelot_io_platform_init);
+EXPORT_SYMBOL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 701e82dd749a..faddce43f2e3 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -40,13 +40,12 @@ struct qos_policer_conf {
u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
};
-static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
+static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
bool cir_discard = 0, pir_discard = 0;
- struct ocelot *ocelot = port->ocelot;
u32 pbs_max = 0, cbs_max = 0;
u8 ipg = 20;
u32 value;
@@ -123,22 +122,26 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
/* Check limits */
if (pir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid pir\n");
+ dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
+ port, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid cir\n");
+ dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
+ port, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
- netdev_err(port->dev, "Invalid pbs\n");
+ dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
+ port, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
- netdev_err(port->dev, "Invalid cbs\n");
+ dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
+ port, cbs, cbs_max);
return -EINVAL;
}
@@ -171,10 +174,9 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
return 0;
}
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
@@ -185,11 +187,10 @@ int ocelot_port_policer_add(struct ocelot_port *port,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- netdev_dbg(port->dev,
- "%s: port %u pir %u kbps, pbs %u bytes\n",
- __func__, port->chip_port, pp.pir, pp.pbs);
+ dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port, pp.pir, pp.pbs);
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -198,22 +199,21 @@ int ocelot_port_policer_add(struct ocelot_port *port,
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
-int ocelot_port_policer_del(struct ocelot_port *port)
+int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
- netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port);
+ dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port);
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -221,7 +221,7 @@ int ocelot_port_policer_del(struct ocelot_port *port)
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index d1137f79efda..ae9509229463 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -14,9 +14,9 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol);
-int ocelot_port_policer_del(struct ocelot_port *port);
+int ocelot_port_policer_del(struct ocelot *ocelot, int port);
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e59977d20400..b88b5899b227 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -423,7 +423,7 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
-int ocelot_chip_init(struct ocelot *ocelot)
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot)
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
if (ret)
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index 16a6db71ca5e..a4f7fbd76507 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -9,17 +9,19 @@
#include "ocelot_ace.h"
#include <net/pkt_cls.h>
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress)
{
struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
struct flow_action_entry *action;
+ int port = priv->chip_port;
int err;
- netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port->chip_port, f->command, f->cookie);
+ netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
+ __func__, port, f->command, f->cookie);
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
@@ -34,7 +36,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.block_shared) {
+ if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
"Rate limit is not supported on shared blocks");
return -EOPNOTSUPP;
@@ -47,7 +49,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.police_id && port->tc.police_id != f->cookie) {
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
"Only one policer per port is supported\n");
return -EEXIST;
@@ -58,27 +60,27 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
PSCHED_NS2TICKS(action->police.burst),
PSCHED_TICKS_PER_SEC);
- err = ocelot_port_policer_add(port, &pol);
+ err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
return err;
}
- port->tc.police_id = f->cookie;
- port->tc.offload_cnt++;
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
return 0;
case TC_CLSMATCHALL_DESTROY:
- if (port->tc.police_id != f->cookie)
+ if (priv->tc.police_id != f->cookie)
return -ENOENT;
- err = ocelot_port_policer_del(port);
+ err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Could not delete policer\n");
return err;
}
- port->tc.police_id = 0;
- port->tc.offload_cnt--;
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
return 0;
case TC_CLSMATCHALL_STATS: /* fall through */
default:
@@ -90,21 +92,21 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
void *type_data,
void *cb_priv, bool ingress)
{
- struct ocelot_port *port = cb_priv;
+ struct ocelot_port_private *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
ingress ? "ingress" : "egress");
- return ocelot_setup_tc_cls_matchall(port, type_data, ingress);
+ return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return 0;
default:
- netdev_dbg(port->dev, "tc_block_cb: type %d %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
type,
ingress ? "ingress" : "egress");
@@ -130,19 +132,19 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
static LIST_HEAD(ocelot_block_cb_list);
-static int ocelot_setup_tc_block(struct ocelot_port *port,
+static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
int err;
- netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
+ netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
- port->tc.block_shared = f->block_shared;
+ priv->tc.block_shared = f->block_shared;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = ocelot_setup_tc_block_cb_eg;
} else {
@@ -153,14 +155,14 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
+ if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(cb, port, port, NULL);
+ block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(port, f);
+ err = ocelot_setup_tc_block_flower_bind(priv, f);
if (err < 0) {
flow_block_cb_free(block_cb);
return err;
@@ -169,11 +171,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
- block_cb = flow_block_cb_lookup(f->block, cb, port);
+ block_cb = flow_block_cb_lookup(f->block, cb, priv);
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(port, f);
+ ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
@@ -185,11 +187,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
switch (type) {
case TC_SETUP_BLOCK:
- return ocelot_setup_tc_block(port, type_data);
+ return ocelot_setup_tc_block(priv, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2761f3a3ae50..49c7987c2abd 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1346,10 +1346,9 @@ static int nixge_probe(struct platform_device *pdev)
}
}
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)priv->phy_mode < 0) {
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (err) {
netdev_err(ndev, "not find \"phy-mode\" property\n");
- err = -EINVAL;
goto unregister_mdio;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 05d2b478c99b..6b54cb3b681d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct nv_skb_map *prev_tx_ctx;
struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2259,7 +2265,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2305,7 +2314,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
@@ -2357,8 +2369,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
@@ -2381,6 +2400,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
struct nv_skb_map *start_tx_ctx = NULL;
struct nv_skb_map *tmp_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2396,7 +2416,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2416,7 +2442,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2463,7 +2492,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
@@ -2542,8 +2574,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static inline void nv_tx_flip_ownership(struct net_device *dev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 1e26964fe4e9..481b096e984d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1208,8 +1208,16 @@ enum qede_remove_mode {
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- struct qede_dev *edev = netdev_priv(ndev);
- struct qed_dev *cdev = edev->cdev;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+
+ if (!ndev) {
+ dev_info(&pdev->dev, "Device has already been removed\n");
+ return;
+ }
+
+ edev = netdev_priv(ndev);
+ cdev = edev->cdev;
DP_INFO(edev, "Starting qede_remove\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index c84ab052ef26..98f92268cbaa 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -213,9 +213,9 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
{
struct emac_adapter *adpt = netdev_priv(netdev);
- netif_info(adpt, hw, adpt->netdev,
- "changing MTU from %d to %d\n", netdev->mtu,
- new_mtu);
+ netif_dbg(adpt, hw, adpt->netdev,
+ "changing MTU from %d to %d\n", netdev->mtu,
+ new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 9c54b715228e..06de59521fc4 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
if (port->nr_rmnet_devs)
return -EINVAL;
- kfree(port);
-
netdev_rx_handler_unregister(real_dev);
+ kfree(port);
+
/* release reference on real_dev */
dev_put(real_dev);
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index 8f54a2c832eb..355cc810e322 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -37,7 +37,7 @@ struct fw_info {
u8 chksum;
} __packed;
-#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0])
static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
{
@@ -92,19 +92,24 @@ static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw)
for (index = 0; index < pa->size; index++) {
u32 action = le32_to_cpu(pa->code[index]);
+ u32 val = action & 0x0000ffff;
u32 regno = (action & 0x0fff0000) >> 16;
switch (action >> 28) {
case PHY_READ:
case PHY_DATA_OR:
case PHY_DATA_AND:
- case PHY_MDIO_CHG:
case PHY_CLEAR_READCOUNT:
case PHY_WRITE:
case PHY_WRITE_PREVIOUS:
case PHY_DELAY_MS:
break;
+ case PHY_MDIO_CHG:
+ if (val > 1)
+ goto out;
+ break;
+
case PHY_BJMPN:
if (regno > index)
goto out;
@@ -164,12 +169,12 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= (regno + 1);
break;
case PHY_MDIO_CHG:
- if (data == 0) {
- fw_write = rtl_fw->phy_write;
- fw_read = rtl_fw->phy_read;
- } else if (data == 1) {
+ if (data) {
fw_write = rtl_fw->mac_mcu_write;
fw_read = rtl_fw->mac_mcu_read;
+ } else {
+ fw_write = rtl_fw->phy_write;
+ fw_read = rtl_fw->phy_read;
}
break;
@@ -198,7 +203,7 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index += regno;
break;
case PHY_DELAY_MS:
- mdelay(data);
+ msleep(data);
break;
}
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0704f8bd1df4..d8fcdb9db8d1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -52,6 +52,7 @@
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
+#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
@@ -135,6 +136,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_60,
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_NONE
@@ -202,6 +204,7 @@ static const struct {
[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_60] = {"RTL8125" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
};
@@ -680,6 +683,7 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
+ int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -712,6 +716,7 @@ MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
+MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
@@ -750,7 +755,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_51;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -910,6 +915,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
{
+ if (reg == 0x1f)
+ return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
+
if (tp->ocp_base != OCP_STD_PHY_BASE)
reg -= 0x10;
@@ -1083,6 +1091,39 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1253,9 +1294,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_start(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_start(tp);
break;
default:
@@ -1287,9 +1326,7 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_stop(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_stop(tp);
break;
default:
@@ -1317,9 +1354,7 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1494,7 +1529,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -2065,6 +2100,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
ret = phy_ethtool_set_eee(tp->phydev, data);
+
+ if (!ret)
+ tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV);
out:
pm_runtime_put_noidle(d);
return ret;
@@ -2095,10 +2134,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ int adv;
- if (supported > 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
+ /* respect EEE advertisement the user may have set */
+ if (tp->eee_adv >= 0)
+ adv = tp->eee_adv;
+ else
+ adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+
+ if (adv >= 0)
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2123,6 +2168,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
{ 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ /* RTL8117 */
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
+
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
{ 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
@@ -2251,14 +2299,6 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
rtl_fw_write_firmware(tp, tp->rtl_fw);
}
-static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
-{
- if (rtl_readphy(tp, reg) != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
{
/* Adjust EEE LED frequency */
@@ -2278,15 +2318,8 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- phy_write(phydev, 0x1f, 0x0007);
- phy_write(phydev, 0x1e, 0x0020);
- phy_set_bits(phydev, 0x15, BIT(8));
-
- phy_write(phydev, 0x1f, 0x0005);
- phy_write(phydev, 0x05, 0x8b85);
- phy_set_bits(phydev, 0x06, BIT(13));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
}
static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
@@ -2383,13 +2416,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x01, 0x90d0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
}
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
@@ -2400,9 +2427,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
(pdev->subsystem_device != 0xe000))
return;
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_writephy(tp, 0x10, 0xf01b);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
}
static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
@@ -2507,54 +2532,28 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
rtl_writephy(tp, 0x1f, 0x0001);
rtl_patchphy(tp, 0x16, 1 << 0);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy(tp, 0x10, 0xf41b);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
}
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0000 },
- { 0x1d, 0x0f00 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x1ec8 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write(tp->phydev, 0x1d, 0x0f00);
+ phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
}
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_set_bits(tp->phydev, 0x14, BIT(5));
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
+ phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
}
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2636,11 +2635,6 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl8168c_3_hw_phy_config(tp);
-}
-
static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
/* Channel Estimation */
{ 0x1f, 0x0001 },
@@ -2691,6 +2685,21 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
{ 0x1f, 0x0002 }
};
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
+{
+ u16 reg_val;
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+ reg_val = rtl_readphy(tp, 0x06);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{
rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
@@ -2724,15 +2733,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x6662 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x6662 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
}
/* RSET couple improve */
@@ -2744,13 +2746,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0002);
rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xbf00);
}
static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
@@ -2777,15 +2775,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x2642 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x2642 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
}
/* Fine tune PLL performance */
@@ -2796,13 +2787,9 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
/* Switching regulator Slew rate */
rtl_writephy(tp, 0x1f, 0x0002);
rtl_patchphy(tp, 0x0f, 0x0017);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xb300);
}
static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
@@ -2856,41 +2843,23 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x04, 0xf800 },
{ 0x04, 0xf000 },
{ 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x0023 },
- { 0x16, 0x0000 },
- { 0x1f, 0x0000 }
};
rtl_writephy_batch(tp, phy_reg_init);
+
+ r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
}
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x002d },
- { 0x18, 0x0040 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
- rtl_patchphy(tp, 0x0d, 1 << 5);
+ phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
}
static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b80 },
- { 0x06, 0xc896 },
- { 0x1f, 0x0000 },
-
/* Channel estimation fine tune */
{ 0x1f, 0x0001 },
{ 0x0b, 0x6c20 },
@@ -2899,60 +2868,38 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0003 },
{ 0x14, 0x6420 },
{ 0x1f, 0x0000 },
-
- /* Update PFM & 10M TX idle timer */
- { 0x1f, 0x0007 },
- { 0x1e, 0x002f },
- { 0x15, 0x1919 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0000 }
};
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
rtl_writephy_batch(tp, phy_reg_init);
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
/* DCO enable for 10M IDLE Power */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0023);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
/* For impedance matching */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
- rtl_writephy(tp, 0x1f, 0x0006);
- rtl_writephy(tp, 0x00, 0x5a00);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
}
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
@@ -2971,36 +2918,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0004 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0002 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Green Setting */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b5b },
- { 0x06, 0x9222 },
- { 0x05, 0x8b6d },
- { 0x06, 0x8000 },
- { 0x05, 0x8b76 },
- { 0x06, 0x8000 },
- { 0x1f, 0x0000 }
- };
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
- rtl_apply_firmware(tp);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
/* For 4-corner performance improve */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3009,25 +2940,14 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3047,24 +2967,17 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* Improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3072,52 +2985,31 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00fb },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
- rtl_apply_firmware(tp);
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3129,77 +3021,43 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
-
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00aa },
- { 0x1f, 0x0000 },
-
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
-
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
-
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
/* Modify green table for giga */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b54);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8b5d);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8a7c);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a7f);
- rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
- rtl_writephy(tp, 0x05, 0x8a82);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a88);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
/* uc same-seed solution */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
@@ -3219,12 +3077,8 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x8084);
- phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
- phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3254,9 +3108,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
@@ -3286,73 +3138,48 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
u16 dout_tapbin;
u32 data;
rtl_apply_firmware(tp);
/* CHN EST parameters adjust - giga master */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x809b);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80a2);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80a4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
- rtl_writephy(tp, 0x13, 0x809c);
- rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
/* CHN EST parameters adjust - giga slave */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80ad);
- rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80b4);
- rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80ac);
- rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
/* CHN EST parameters adjust - fnet */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808e);
- rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
- rtl_writephy(tp, 0x13, 0x8090);
- rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
- rtl_writephy(tp, 0x13, 0x8092);
- rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
/* enable R-tune & PGA-retune function */
dout_tapbin = 0;
- rtl_writephy(tp, 0x1f, 0x0a46);
- data = rtl_readphy(tp, 0x13);
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
data &= 3;
data <<= 2;
dout_tapbin |= data;
- data = rtl_readphy(tp, 0x12);
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
data &= 0xc000;
data >>= 14;
dout_tapbin |= data;
dout_tapbin = ~(dout_tapbin^0x08);
dout_tapbin <<= 12;
dout_tapbin &= 0xf000;
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x827a);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827b);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827c);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827d);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
-
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3360,22 +3187,13 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
/* SAR ADC performance */
phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x803f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8047);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x804f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8057);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x805f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8067);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x806f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
/* disable phy pfm mode */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
@@ -3388,24 +3206,18 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
{
u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
+ struct phy_device *phydev = tp->phydev;
u16 rlen;
u32 data;
rtl_apply_firmware(tp);
/* CHIN EST parameter update */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808a);
- rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
/* enable R-tune & PGA-retune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3425,26 +3237,20 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
- rtl_writephy(tp, 0x1f, 0x0bcf);
- rtl_writephy(tp, 0x16, data);
- rtl_writephy(tp, 0x1f, 0x0000);
- }
+ (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
+ phy_write_paged(phydev, 0x0bcf, 0x16, data);
/* Modify rlen (TX LPF corner frequency) level */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- data = rtl_readphy(tp, 0x16);
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
data &= 0x000f;
rlen = 0;
if (data > 3)
rlen = data - 3;
data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- rtl_writephy(tp, 0x17, data);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
/* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3453,22 +3259,21 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3477,63 +3282,38 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* Set rg_sel_sdm_rate */
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Channel estimation parameters */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80f3);
- rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
- rtl_writephy(tp, 0x13, 0x80f0);
- rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
- rtl_writephy(tp, 0x13, 0x80ef);
- rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
- rtl_writephy(tp, 0x13, 0x80f6);
- rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
- rtl_writephy(tp, 0x13, 0x80ec);
- rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
- rtl_writephy(tp, 0x13, 0x80ed);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x80f2);
- rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
- rtl_writephy(tp, 0x13, 0x80f4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8110);
- rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
- rtl_writephy(tp, 0x13, 0x810f);
- rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
- rtl_writephy(tp, 0x13, 0x8111);
- rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
- rtl_writephy(tp, 0x13, 0x8113);
- rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
- rtl_writephy(tp, 0x13, 0x8115);
- rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
- rtl_writephy(tp, 0x13, 0x810e);
- rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
- rtl_writephy(tp, 0x13, 0x810c);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x810b);
- rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80d1);
- rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
- rtl_writephy(tp, 0x13, 0x80cd);
- rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
- rtl_writephy(tp, 0x13, 0x80d3);
- rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
- rtl_writephy(tp, 0x13, 0x80d5);
- rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
- rtl_writephy(tp, 0x13, 0x80d7);
- rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
/* Force PWM-mode */
rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3552,6 +3332,46 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_enable_eee(tp);
}
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(tp);
+ rtl8168h_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3571,35 +3391,21 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0005 },
- { 0x1a, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0004 },
- { 0x1c, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x15, 0x7701 },
- { 0x1f, 0x0000 }
- };
-
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(20);
rtl_apply_firmware(tp);
@@ -3622,8 +3428,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
@@ -3648,38 +3453,22 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x80ea);
- phy_modify(phydev, 0x14, 0xff00, 0xc400);
- phy_write(phydev, 0x13, 0x80eb);
- phy_modify(phydev, 0x14, 0x0700, 0x0300);
- phy_write(phydev, 0x13, 0x80f8);
- phy_modify(phydev, 0x14, 0xff00, 0x1c00);
- phy_write(phydev, 0x13, 0x80f1);
- phy_modify(phydev, 0x14, 0xff00, 0x3000);
- phy_write(phydev, 0x13, 0x80fe);
- phy_modify(phydev, 0x14, 0xff00, 0xa500);
- phy_write(phydev, 0x13, 0x8102);
- phy_modify(phydev, 0x14, 0xff00, 0x5000);
- phy_write(phydev, 0x13, 0x8105);
- phy_modify(phydev, 0x14, 0xff00, 0x3300);
- phy_write(phydev, 0x13, 0x8100);
- phy_modify(phydev, 0x14, 0xff00, 0x7000);
- phy_write(phydev, 0x13, 0x8104);
- phy_modify(phydev, 0x14, 0xff00, 0xf000);
- phy_write(phydev, 0x13, 0x8106);
- phy_modify(phydev, 0x14, 0xff00, 0x6500);
- phy_write(phydev, 0x13, 0x80dc);
- phy_modify(phydev, 0x14, 0xff00, 0xed00);
- phy_write(phydev, 0x13, 0x80df);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x13, 0x80e1);
- phy_clear_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- phy_write_paged(phydev, 0xa43, 0x13, 0x819f);
- phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
@@ -3734,22 +3523,16 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
phy_write(phydev, 0x14, 0x0002);
for (i = 0; i < 25; i++)
phy_write(phydev, 0x14, 0x0000);
-
- phy_write(phydev, 0x13, 0x8257);
- phy_write(phydev, 0x14, 0x020F);
-
- phy_write(phydev, 0x13, 0x80EA);
- phy_write(phydev, 0x14, 0x7843);
phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
rtl_apply_firmware(tp);
phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81a2);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
@@ -3787,7 +3570,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
[RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
[RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
[RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
@@ -3817,6 +3600,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
};
@@ -3910,7 +3694,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3946,6 +3730,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -3977,6 +3762,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4008,7 +3794,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
@@ -4194,7 +3980,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
break;
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
@@ -5040,6 +4826,71 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+static void rtl_hw_start_8117(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8117[] = {
+ { 0x19, 0x0040, 0x1100 },
+ { 0x59, 0x0040, 0x1100 },
+ };
+ int rg_saw_cnt;
+
+ rtl8168ep_stop_cmac(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8117);
+
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ rtl_reset_packet_filter(tp);
+
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
+
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+
+ rtl8168_config_eee_mac(tp);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
+
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+
+ rtl_pcie_state_l2l3_disable(tp);
+
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
+ if (rg_saw_cnt > 0) {
+ u16 sw_cnt_1ms_ini;
+
+ sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
+ }
+
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_write(tp, 0xea80, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
+
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc094, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ /* firmware is for MAC only */
+ rtl_apply_firmware(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
+}
+
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -5327,6 +5178,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
};
@@ -6873,7 +6725,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
/* fall through */
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
@@ -6983,6 +6835,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
+ tp->eee_adv = -1;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a9c89d5d8898..9f88b5db4f89 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+
/* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1
@@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE];
- u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work;
/* MII transceiver section. */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index de9aa8c47f1c..4b13a184bfc7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size;
int i;
- priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
- ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
-
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
- priv->rx_buf_sz +
+ RX_BUF_SZ +
RAVB_ALIGN - 1);
if (!skb)
break; /* Better luck next round. */
@@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
- if (netif_running(ndev))
- return -EBUSY;
+ struct ravb_private *priv = netdev_priv(ndev);
ndev->mtu = new_mtu;
+
+ if (netif_running(ndev)) {
+ synchronize_irq(priv->emac_irq);
+ ravb_emac_init(ndev);
+ }
+
netdev_update_features(ndev);
return 0;
@@ -2046,7 +2048,9 @@ static int ravb_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
- priv->phy_interface = of_get_phy_mode(np);
+ error = of_get_phy_mode(np, &priv->phy_interface);
+ if (error && error != -ENODEV)
+ goto out_release;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 9a42580693cb..6984bd5b7da9 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
@@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
+ /* Reject requests with unsupported flags */
+ if (req->flags)
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7ba35a0bdb29..e19b49c4013e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3183,6 +3183,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
+ phy_interface_t interface;
const char *mac_addr;
int ret;
@@ -3190,10 +3191,10 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
if (!pdata)
return NULL;
- ret = of_get_phy_mode(np);
- if (ret < 0)
+ ret = of_get_phy_mode(np, &interface);
+ if (ret)
return NULL;
- pdata->phy_interface = ret;
+ pdata->phy_interface = interface;
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 2412c87561e0..33f79402850d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -30,12 +30,15 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sxgbe_dma_cfg *dma_cfg;
+ int err;
if (!np)
return -ENODEV;
*mac = of_get_mac_address(np);
- plat->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &plat->interface);
+ if (err && err != -ENODEV)
+ return err;
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a7d9841105d8..bec261905530 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -724,6 +724,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
netif_err(efx, rx_err, efx->net_dev,
"XDP TX failed (%d)\n", err);
channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
} else {
channel->n_rx_xdp_tx++;
}
@@ -737,6 +738,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
netif_err(efx, rx_err, efx->net_dev,
"XDP redirect failed (%d)\n", err);
channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
} else {
channel->n_rx_xdp_redirect++;
}
@@ -746,6 +748,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
bpf_warn_invalid_xdp_action(xdp_act);
efx_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
break;
case XDP_ABORTED:
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index deb636d653f3..d242906ae233 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -48,7 +48,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -89,6 +89,7 @@ struct ioc3_private {
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
+ void *tx_ring;
struct ioc3_etxd *txr;
dma_addr_t rxr_dma;
dma_addr_t txr_dma;
@@ -1173,26 +1174,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ioc3 *ioc3;
unsigned long ioc3_base, ioc3_size;
u32 vendor, model, rev;
- int err, pci_using_dac;
+ int err;
/* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err < 0) {
- pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
- pci_name(pdev));
- goto out;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out;
}
if (pci_enable_device(pdev))
@@ -1204,9 +1193,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable;
}
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
-
err = pci_request_regions(pdev, "ioc3");
if (err)
goto out_free;
@@ -1242,8 +1228,8 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_stop(ip);
/* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
- ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
- &ip->rxr_dma, GFP_ATOMIC, 0);
+ ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
+ GFP_KERNEL);
if (!ip->rxr) {
pr_err("ioc3-eth: rx ring allocation failed\n");
err = -ENOMEM;
@@ -1251,14 +1237,16 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
- ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
- &ip->txr_dma,
- GFP_KERNEL | __GFP_ZERO, 0);
- if (!ip->txr) {
+ ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
+ &ip->txr_dma, GFP_KERNEL);
+ if (!ip->tx_ring) {
pr_err("ioc3-eth: tx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
+ /* Align TX ring */
+ ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
+ ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
ioc3_init(dev);
@@ -1288,7 +1276,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
@@ -1313,11 +1301,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_stop:
del_timer_sync(&ip->ioc3_timer);
if (ip->rxr)
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- if (ip->txr)
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma);
+ if (ip->tx_ring)
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ ip->txr_dma);
out_res:
pci_release_regions(pdev);
out_free:
@@ -1335,10 +1323,8 @@ static void ioc3_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6e984d5a729f..f7e927ad67fa 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1565,10 +1565,10 @@ static int ave_probe(struct platform_device *pdev)
return -EINVAL;
np = dev->of_node;
- phy_mode = of_get_phy_mode(np);
- if ((int)phy_mode < 0) {
+ ret = of_get_phy_mode(np, &phy_mode);
+ if (ret) {
dev_err(dev, "phy-mode not found\n");
- return -EINVAL;
+ return ret;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 912bbb6515b2..b210e987a1db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -248,12 +248,13 @@ struct stmmac_safety_stats {
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x10
+#define DEF_DMA_RIWT 0xa0
/* Tx coalesce parameters */
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 1
-#define STMMAC_RX_FRAMES 25
+#define STMMAC_TX_FRAMES 25
+#define STMMAC_RX_FRAMES 0
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 527f93320a5a..d0d2d0fc5f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -61,9 +61,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
- int phy_mode;
- void __iomem *ctl_block;
struct anarion_gmac *gmac;
+ phy_interface_t phy_mode;
+ void __iomem *ctl_block;
+ int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
@@ -78,7 +79,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = (uintptr_t)ctl_block;
- phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+ if (err)
+ return ERR_PTR(err);
+
switch (phy_mode) {
case PHY_INTERFACE_MODE_RGMII: /* Fall through */
case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 0d21082ceb93..6ae13dc19510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -189,9 +189,10 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
{
struct device *dev = &gmac->pdev->dev;
+ int ret;
- gmac->phy_mode = of_get_phy_mode(dev->of_node);
- if ((int)gmac->phy_mode < 0) {
+ ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
+ if (ret) {
dev_err(dev, "missing phy mode property\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index cea7a0c7ce68..bdb80421acac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -54,7 +54,7 @@ struct mediatek_dwmac_plat_data {
struct device_node *np;
struct regmap *peri_regmap;
struct device *dev;
- int phy_mode;
+ phy_interface_t phy_mode;
bool rmii_rxc;
};
@@ -243,6 +243,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
+ int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -250,10 +251,10 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- plat->phy_mode = of_get_phy_mode(plat->np);
- if (plat->phy_mode < 0) {
+ err = of_get_phy_mode(plat->np, &plat->phy_mode);
+ if (err) {
dev_err(plat->dev, "not find phy-mode\n");
- return -EINVAL;
+ return err;
}
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 306da8f6b7d5..bd6c01004913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -338,10 +338,9 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
}
dwmac->dev = &pdev->dev;
- dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)dwmac->phy_mode < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
+ if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- ret = -EINVAL;
goto err_remove_config_dt;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e2e469c37a4d..dc50ba13a746 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -37,7 +37,7 @@ struct rk_gmac_ops {
struct rk_priv_data {
struct platform_device *pdev;
- int phy_iface;
+ phy_interface_t phy_iface;
struct regulator *regulator;
bool suspended;
const struct rk_gmac_ops *ops;
@@ -1224,7 +1224,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+ of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
bsp_priv->ops = ops;
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index e9fd661f7995..e1b63df6f96f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -116,7 +116,7 @@
#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
- int interface; /* MII interface */
+ phy_interface_t interface; /* MII interface */
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
@@ -269,7 +269,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return err;
}
- dwmac->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &dwmac->interface);
+ if (err && err != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ return err;
+ }
+
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 4ef041bdf6a1..9b7be996d07b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -155,18 +155,14 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
-
- if (dwmac->clk_eth_ck) {
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
- }
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -175,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val, ret;
+ int val;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
@@ -211,8 +207,8 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
}
/* Need to update PMCCLRR (clear register) */
- ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
@@ -320,12 +316,10 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->clk_ethstp);
}
- /* Clock for sysconfig */
+ /* Optional Clock for sysconfig */
dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk");
- if (IS_ERR(dwmac->syscfg_clk)) {
- dev_err(dev, "No syscfg clock provided...\n");
- return PTR_ERR(dwmac->syscfg_clk);
- }
+ if (IS_ERR(dwmac->syscfg_clk))
+ dwmac->syscfg_clk = NULL;
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
@@ -437,8 +431,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index ddcc191febdb..1c8d84ed8410 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1105,6 +1105,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
+ phy_interface_t interface;
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
@@ -1178,10 +1179,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node);
- if (ret < 0)
+ ret = of_get_phy_mode(dev->of_node, &interface);
+ if (ret)
return -EINVAL;
- plat_dat->interface = ret;
+ plat_dat->interface = interface;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
@@ -1226,7 +1227,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
dwmac_mux:
sun8i_dwmac_unset_syscon(gmac);
dwmac_exit:
- sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+ stmmac_pltfr_remove(pdev);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index a299da3971b4..26353ef616b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,7 @@
#include "stmmac_platform.h"
struct sunxi_priv_data {
- int interface;
+ phy_interface_t interface;
int clk_enabled;
struct clk *tx_clk;
struct regulator *regulator;
@@ -118,7 +118,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- gmac->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node, &gmac->interface);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ goto err_remove_config_dt;
+ }
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 07e97f45755d..2dc70d104161 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -14,6 +14,7 @@
/* MAC registers */
#define GMAC_CONFIG 0x00000000
+#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_VLAN_TAG 0x00000050
@@ -188,6 +189,11 @@ enum power_event {
#define GMAC_CONFIG_TE BIT(1)
#define GMAC_CONFIG_RE BIT(0)
+/* MAC extended config */
+#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
+#define GMAC_CONFIG_HDSMS_SHIFT 20
+#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+
/* MAC HW features0 bitmap */
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
#define GMAC_HW_FEAT_ADDMAC BIT(18)
@@ -211,6 +217,7 @@ enum power_event {
#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_FEAT_SPHEN BIT(17)
#define GMAC_HW_ADDR64 GENMASK(15, 14)
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index bec929daf703..40ca00e596dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
* bits used depends on the hardware configuration
* selected at core configuration time.
*/
- int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+ u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
ETH_ALEN)) >> (32 - mcbitslog2);
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
@@ -733,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 707ab5eba8da..3e14da69f378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -83,9 +83,10 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes3 & RDES3_OWN))
return dma_own;
- /* Verify rx error by looking at the last segment. */
- if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
return discard_frame;
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return rx_not_ls;
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
@@ -188,7 +189,7 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
@@ -492,6 +493,18 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
}
+static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & RDES2_HL;
+ return 0;
+}
+
+static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -519,6 +532,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_sarc = dwmac4_set_sarc,
.set_vlan_tag = dwmac4_set_vlan_tag,
.set_vlan = dwmac4_set_vlan,
+ .get_rx_header_len = dwmac4_get_rx_header_len,
+ .set_sec_addr = dwmac4_set_sec_addr,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0d7b3bbcd5a7..6d92109dc9aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -109,6 +109,7 @@
#define RDES2_L4_FILTER_MATCH BIT(28)
#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+#define RDES2_HL GENMASK(9, 0)
/* RDES3 (write back format) */
#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index b24c89572745..c15409030710 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -252,19 +252,9 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
@@ -368,6 +358,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -460,6 +451,22 @@ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
}
+static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
+
+ value &= ~GMAC_CONFIG_HDSMS;
+ value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + GMAC_EXT_CONFIG);
+
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (en)
+ value |= DMA_CONTROL_SPH;
+ else
+ value &= ~DMA_CONTROL_SPH;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -486,6 +493,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -514,4 +522,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 5299fa1001a3..589931795847 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -110,6 +110,7 @@
#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
/* DMA Control X */
+#define DMA_CONTROL_SPH BIT(24)
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 775db776b3cc..23fecf68f781 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
// stmmac Support for 5.xx Ethernet QoS cores
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 99037386080a..3b6e559aa0b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
* stmmac XGMAC definitions.
@@ -360,7 +360,7 @@
#define XGMAC_TBUE BIT(2)
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
- XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE)
+ XGMAC_RIE | XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 5cda360d5d07..082f5ee9e525 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+ value &= ~XGMAC_TSA;
value |= XGMAC_CC | XGMAC_CBS;
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
}
@@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
value |= XGMAC_FILTER_HMC;
netdev_for_each_mc_addr(ha, dev) {
- int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
+ u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
(32 - mcbitslog2));
mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
}
@@ -555,7 +556,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index ae48154f933c..bd5838ce1e8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
{
- *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 7cc331996cd8..22a7f0cc1b90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -183,19 +183,9 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
@@ -372,7 +362,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
- dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
@@ -473,6 +463,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB) {
@@ -480,6 +471,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
} else {
value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
}
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 1303d1e9a18f..aa5b917398fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
// stmmac HW Interface Callbacks
@@ -357,7 +357,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double);
+ __le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a223584f5f9a..252cf48c5816 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -176,6 +176,7 @@
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
#define MMC_XGMAC_RX_FPE_FRAG 0x234
+#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
@@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
- writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}
static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 654a2b7595b8..8cc4cd0cc515 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -36,6 +36,7 @@
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
#include <linux/phylink.h>
+#include <linux/udp.h>
#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -1502,10 +1503,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->dma_erx, rx_q->dma_rx_phy);
kfree(rx_q->buf_pool);
- if (rx_q->page_pool) {
- page_pool_request_shutdown(rx_q->page_pool);
+ if (rx_q->page_pool)
page_pool_destroy(rx_q->page_pool);
- }
}
}
@@ -2604,9 +2603,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
- if (!ret)
- priv->rx_riwt = MIN_DMA_RIWT;
+ if (!priv->rx_riwt)
+ priv->rx_riwt = DEF_DMA_RIWT;
+
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
}
if (priv->hw->pcs)
@@ -2914,19 +2914,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- int tmp_pay_len = 0;
+ u8 proto_hdr_len, hdr;
+ bool has_vlan, set_ic;
u32 pay_len, mss;
- u8 proto_hdr_len;
dma_addr_t des;
- bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
/* Compute header lengths */
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr = tcp_hdrlen(skb);
+ }
/* Desc availability based on threshold should be enough safe */
if (unlikely(stmmac_tx_avail(priv, queue) <
@@ -2956,8 +2963,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (netif_msg_tx_queued(priv)) {
- pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
- __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
skb->data_len);
}
@@ -2996,6 +3003,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_desc_addr(priv, first, des);
tmp_pay_len = pay_len;
des += proto_hdr_len;
+ pay_len = 0;
}
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3023,6 +3031,30 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ /* Manage tx mitigation */
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
@@ -3040,19 +3072,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_tso_frames++;
priv->xstats.tx_tso_nfrags += nfrags;
- /* Manage tx mitigation */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
- tx_q->tx_count_frames = 0;
- stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
- }
-
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -3070,7 +3089,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len,
pay_len,
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -3124,27 +3143,30 @@ dma_map_err:
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ unsigned int first_entry, tx_packets, enh_desc;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- unsigned int enh_desc;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
dma_addr_t des;
- bool has_vlan;
- int entry;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
@@ -3224,6 +3246,38 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[entry] = skb;
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (likely(priv->extend_desc))
+ desc = &tx_q->dma_etx[entry].basic;
+ else
+ desc = &tx_q->dma_tx[entry];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
@@ -3259,23 +3313,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* According to the coalesce parameter the IC bit for the latest
- * segment is reset and the timer re-started to clean the tx status.
- * This approach takes care about the fragments: desc is the first
- * element in case of no SG.
- */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
- tx_q->tx_count_frames = 0;
- stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
- }
-
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -3425,7 +3462,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_count_frames += priv->rx_coal_frames;
if (rx_q->rx_count_frames > priv->rx_coal_frames)
rx_q->rx_count_frames = 0;
- use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
+
+ use_rx_wd = !priv->rx_coal_frames;
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
@@ -3438,6 +3479,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int ret, coe = priv->hw->rx_csum;
+ unsigned int plen = 0, hlen = 0;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
@@ -3467,11 +3557,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- unsigned int hlen = 0, prev_len = 0;
+ unsigned int buf1_len = 0, buf2_len = 0;
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- unsigned int sec_len;
int entry;
u32 hash;
@@ -3490,7 +3579,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
break;
read_again:
- sec_len = 0;
+ buf1_len = 0;
+ buf2_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3506,8 +3596,6 @@ read_again:
if (unlikely(status & dma_own))
break;
- count++;
-
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
next_entry = rx_q->cur_rx;
@@ -3517,7 +3605,6 @@ read_again:
np = rx_q->dma_rx + next_entry;
prefetch(np);
- prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3534,67 +3621,61 @@ read_again:
goto read_again;
if (unlikely(error)) {
dev_kfree_skb(skb);
+ skb = NULL;
+ count++;
continue;
}
/* Buffer is good. Go on. */
- if (likely(status & rx_not_ls)) {
- len += priv->dma_buf_sz;
- } else {
- prev_len = len;
- len = stmmac_get_rx_frame_len(priv, p, coe);
-
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))
- len -= ETH_FCS_LEN;
+ prefetch(page_address(buf->page));
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap)) {
+ if (buf2_len)
+ buf2_len -= ETH_FCS_LEN;
+ else
+ buf1_len -= ETH_FCS_LEN;
+
+ len -= ETH_FCS_LEN;
}
if (!skb) {
- int ret = stmmac_get_rx_header_len(priv, p, &hlen);
-
- if (priv->sph && !ret && (hlen > 0)) {
- sec_len = len;
- if (!(status & rx_not_ls))
- sec_len = sec_len - hlen;
- len = hlen;
-
- prefetch(page_address(buf->sec_page));
- priv->xstats.rx_split_hdr_pkt_n++;
- }
-
- skb = napi_alloc_skb(&ch->rx_napi, len);
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
priv->dev->stats.rx_dropped++;
- continue;
+ count++;
+ goto drain_data;
}
- dma_sync_single_for_cpu(priv->device, buf->addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- len);
- skb_put(skb, len);
+ buf1_len);
+ skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
- } else {
- unsigned int buf_len = len - prev_len;
-
- if (likely(status & rx_not_ls))
- buf_len = priv->dma_buf_sz;
-
+ } else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
- buf_len, DMA_FROM_DEVICE);
+ buf1_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, 0, buf_len,
+ buf->page, 0, buf1_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
@@ -3602,22 +3683,23 @@ read_again:
buf->page = NULL;
}
- if (sec_len > 0) {
+ if (buf2_len) {
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
- sec_len, DMA_FROM_DEVICE);
+ buf2_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->sec_page, 0, sec_len,
+ buf->sec_page, 0, buf2_len,
priv->dma_buf_sz);
- len += sec_len;
-
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
+drain_data:
if (likely(status & rx_not_ls))
goto read_again;
+ if (!skb)
+ continue;
/* Got entire packet into SKB. Finish it. */
@@ -3635,12 +3717,14 @@ read_again:
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
+ count++;
}
- if (status & rx_not_ls) {
+ if (status & rx_not_ls || skb) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
@@ -3988,11 +4072,13 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
/*
- * There is no way to determine the number of TSO
+ * There is no way to determine the number of TSO/USO
* capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
+ * because if TSO/USO is supported then at least this
* one will be capable.
*/
return 0;
@@ -4208,6 +4294,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
+ __le16 pmatch = 0;
int count = 0;
u16 vid = 0;
@@ -4222,11 +4309,11 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
if (count > 2) /* VID = 0 always passes filter */
return -EOPNOTSUPP;
- vid = cpu_to_le16(vid);
+ pmatch = cpu_to_le16(vid);
hash = 0;
}
- return stmmac_update_vlan_hash(priv, priv->hw, hash, vid, is_double);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -4506,6 +4593,8 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 40c42637ad75..cfe5d8b73142 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -41,20 +41,32 @@
#define MII_XGMAC_BUSY BIT(22)
#define MII_XGMAC_MAX_C22ADDR 3
#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+#define MII_XGMAC_PA_SHIFT 16
+#define MII_XGMAC_DA_SHIFT 21
+
+static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ u32 tmp;
+
+ /* Set port as Clause 45 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
+ *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
+ return 0;
+}
static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
int phyreg, u32 *hw_addr)
{
- unsigned int mii_data = priv->hw->mii.data;
u32 tmp;
/* HW does not support C22 addr >= 4 */
if (phyaddr > MII_XGMAC_MAX_C22ADDR)
return -ENODEV;
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
/* Set port as Clause 22 */
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
@@ -62,7 +74,7 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
tmp |= BIT(phyaddr);
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
- *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
return 0;
}
@@ -75,17 +87,28 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+ value |= MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -115,17 +138,28 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= phydata | MII_XGMAC_SADDR;
+ value |= phydata;
value |= MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
@@ -363,6 +397,10 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
+ /* Looks like we need a dummy read for XGMAC only and C45 PHYs */
+ if (priv->plat->has_xgmac)
+ stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 170c3a052b14..bedaff0c13bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- plat->phy_interface = of_get_phy_mode(np);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ rc = of_get_phy_mode(np, &plat->phy_interface);
+ if (rc)
+ return ERR_PTR(rc);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index df638b18b72c..0989e2bb6ee3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
cfg = &priv->pps[rq->perout.index];
cfg->start.tv_sec = rq->perout.start.sec;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0b5db52149bc..f3d8b9336b8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -6,7 +6,9 @@
* Author: Jose Abreu <joabreu@synopsys.com>
*/
+#include <linux/bitrev.h>
#include <linux/completion.h>
+#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/ip.h>
#include <linux/phy.h>
@@ -485,12 +487,48 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
return -EOPNOTSUPP;
}
+static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+ int mc_offset = 32 - priv->hw->mcast_bits_log2;
+ struct netdev_hw_addr *ha;
+ u32 hash, hash_nr;
+
+ /* First compute the hash for desired addr */
+ hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
+ hash_nr = hash >> 5;
+ hash = 1 << (hash & 0x1f);
+
+ /* Now, check if it collides with any existing one */
+ netdev_for_each_mc_addr(ha, priv->dev) {
+ u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
+ if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
+ return false;
+ }
+
+ /* No collisions, address is good to go */
+ return true;
+}
+
+static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+ struct netdev_hw_addr *ha;
+
+ /* Check if it collides with any existing one */
+ netdev_for_each_uc_addr(ha, priv->dev) {
+ if (!memcmp(ha->addr, addr, ETH_ALEN))
+ return false;
+ }
+
+ /* No collisions, address is good to go */
+ return true;
+}
+
static int stmmac_test_hfilt(struct stmmac_priv *priv)
{
- unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
- unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
+ unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+ unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { };
- int ret;
+ int ret, tries = 256;
ret = stmmac_filter_check(priv);
if (ret)
@@ -499,6 +537,16 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;
+ while (--tries) {
+ /* We only need to check the bd_addr for collisions */
+ bd_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_hash_check(priv, bd_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
+
ret = dev_mc_add(priv->dev, gd_addr);
if (ret)
return ret;
@@ -523,13 +571,25 @@ cleanup:
static int stmmac_test_pfilt(struct stmmac_priv *priv)
{
- unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
- unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
+ unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { };
- int ret;
+ int ret, tries = 256;
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+ return -EOPNOTSUPP;
+
+ while (--tries) {
+ /* We only need to check the bd_addr for collisions */
+ bd_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_perfect_check(priv, bd_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
ret = dev_uc_add(priv->dev, gd_addr);
if (ret)
@@ -553,39 +613,31 @@ cleanup:
return ret;
}
-static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
-{
- return 0;
-}
-
-static void stmmac_test_set_rx_mode(struct net_device *netdev)
-{
- /* As we are in test mode of ethtool we already own the rtnl lock
- * so no address will change from user. We can just call the
- * ndo_set_rx_mode() callback directly */
- if (netdev->netdev_ops->ndo_set_rx_mode)
- netdev->netdev_ops->ndo_set_rx_mode(netdev);
-}
-
static int stmmac_test_mcfilt(struct stmmac_priv *priv)
{
- unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
- unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { };
- int ret;
+ int ret, tries = 256;
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
- if (!priv->hw->multicast_filter_bins)
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP;
- /* Remove all MC addresses */
- __dev_mc_unsync(priv->dev, NULL);
- stmmac_test_set_rx_mode(priv->dev);
+ while (--tries) {
+ /* We only need to check the mc_addr for collisions */
+ mc_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_hash_check(priv, mc_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
ret = dev_uc_add(priv->dev, uc_addr);
if (ret)
- goto cleanup;
+ return ret;
attr.dst = uc_addr;
@@ -602,30 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
cleanup:
dev_uc_del(priv->dev, uc_addr);
- __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
- stmmac_test_set_rx_mode(priv->dev);
return ret;
}
static int stmmac_test_ucfilt(struct stmmac_priv *priv)
{
- unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
- unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { };
- int ret;
+ int ret, tries = 256;
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
- if (!priv->hw->multicast_filter_bins)
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;
- /* Remove all UC addresses */
- __dev_uc_unsync(priv->dev, NULL);
- stmmac_test_set_rx_mode(priv->dev);
+ while (--tries) {
+ /* We only need to check the uc_addr for collisions */
+ uc_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_perfect_check(priv, uc_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
ret = dev_mc_add(priv->dev, mc_addr);
if (ret)
- goto cleanup;
+ return ret;
attr.dst = mc_addr;
@@ -642,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
cleanup:
dev_mc_del(priv->dev, mc_addr);
- __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
- stmmac_test_set_rx_mode(priv->dev);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f9a9a9d82233..7d972e0fd2b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -321,8 +321,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return -EINVAL;
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
- return -EOPNOTSUPP;
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 834afca3a019..9170572346b5 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
select PHYLIB
+ select GENERIC_ALLOCATOR
---help---
This driver supports TI's DaVinci Ethernet .
@@ -58,9 +59,24 @@ config TI_CPSW
To compile this driver as a module, choose M here: the module
will be called cpsw.
+config TI_CPSW_SWITCHDEV
+ tristate "TI CPSW Switch Support with switchdev"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ select NET_SWITCHDEV
+ select TI_DAVINCI_MDIO
+ select MFD_SYSCON
+ select REGMAP
+ select NET_DEVLINK
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw_new.
+
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
+ depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
depends on COMMON_CLK
depends on POSIX_TIMERS
---help---
@@ -72,7 +88,7 @@ config TI_CPTS
config TI_CPTS_MOD
tristate
depends on TI_CPTS
- default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+ default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
select NET_PTP_CLASSIFY
imply PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index ed12e1e5df2f..d34df8e5cf94 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
+ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f298d714efd6..6ae4a72e6f43 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,7 +34,6 @@
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
-#include <linux/filter.h>
#include <linux/pinctrl/consumer.h>
#include <net/pkt_cls.h>
@@ -64,10 +63,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
-/* The buf includes headroom compatible with both skb and xdpf */
-#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
-
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -82,10 +77,16 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
(func)(slave++, ##arg); \
} while (0)
-#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
+}
-#define CPSW_XDP_CONSUMED 1
-#define CPSW_XDP_PASS 0
+static int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
@@ -332,218 +333,6 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_del_mc_addr);
}
-void cpsw_intr_enable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
- writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, true);
- return;
-}
-
-void cpsw_intr_disable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0, &cpsw->wr_regs->tx_en);
- writel_relaxed(0, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, false);
- return;
-}
-
-static int cpsw_is_xdpf_handle(void *handle)
-{
- return (unsigned long)handle & BIT(0);
-}
-
-static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
-{
- return (void *)((unsigned long)xdpf | BIT(0));
-}
-
-static struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
-{
- return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
-}
-
-struct __aligned(sizeof(long)) cpsw_meta_xdp {
- struct net_device *ndev;
- int ch;
-};
-
-void cpsw_tx_handler(void *token, int len, int status)
-{
- struct cpsw_meta_xdp *xmeta;
- struct xdp_frame *xdpf;
- struct net_device *ndev;
- struct netdev_queue *txq;
- struct sk_buff *skb;
- int ch;
-
- if (cpsw_is_xdpf_handle(token)) {
- xdpf = cpsw_handle_to_xdpf(token);
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- ndev = xmeta->ndev;
- ch = xmeta->ch;
- xdp_return_frame(xdpf);
- } else {
- skb = token;
- ndev = skb->dev;
- ch = skb_get_queue_mapping(skb);
- cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
- dev_kfree_skb_any(skb);
- }
-
- /* Check whether the queue is stopped due to stalled tx dma, if the
- * queue is stopped then start the queue as we have free desc for tx
- */
- txq = netdev_get_tx_queue(ndev, ch);
- if (unlikely(netif_tx_queue_stopped(txq)))
- netif_tx_wake_queue(txq);
-
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += len;
-}
-
-static void cpsw_rx_vlan_encap(struct sk_buff *skb)
-{
- struct cpsw_priv *priv = netdev_priv(skb->dev);
- struct cpsw_common *cpsw = priv->cpsw;
- u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
- u16 vtag, vid, prio, pkt_type;
-
- /* Remove VLAN header encapsulation word */
- skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
-
- pkt_type = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
- /* Ignore unknown & Priority-tagged packets*/
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
- pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
- return;
-
- vid = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
- VLAN_VID_MASK;
- /* Ignore vid 0 and pass packet as is */
- if (!vid)
- return;
- /* Ignore default vlans in dual mac mode */
- if (cpsw->data.dual_emac &&
- vid == cpsw->slaves[priv->emac_port].port_vlan)
- return;
-
- prio = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
-
- vtag = (prio << VLAN_PRIO_SHIFT) | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
-
- /* strip vlan tag for VLAN-tagged packet */
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
- skb_pull(skb, VLAN_HLEN);
- }
-}
-
-static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct cpdma_chan *txch;
- dma_addr_t dma;
- int ret, port;
-
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = 0;
- txch = cpsw->txv[0].ch;
-
- port = priv->emac_port + cpsw->data.dual_emac;
- if (page) {
- dma = page_pool_get_dma_addr(page);
- dma += xdpf->headroom + sizeof(struct xdp_frame);
- ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
- dma, xdpf->len, port);
- } else {
- if (sizeof(*xmeta) > xdpf->headroom) {
- xdp_return_frame_rx_napi(xdpf);
- return -EINVAL;
- }
-
- ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
- xdpf->data, xdpf->len, port);
- }
-
- if (ret) {
- priv->ndev->stats.tx_dropped++;
- xdp_return_frame_rx_napi(xdpf);
- }
-
- return ret;
-}
-
-static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct net_device *ndev = priv->ndev;
- int ret = CPSW_XDP_CONSUMED;
- struct xdp_frame *xdpf;
- struct bpf_prog *prog;
- u32 act;
-
- rcu_read_lock();
-
- prog = READ_ONCE(priv->xdp_prog);
- if (!prog) {
- ret = CPSW_XDP_PASS;
- goto out;
- }
-
- act = bpf_prog_run_xdp(prog, xdp);
- switch (act) {
- case XDP_PASS:
- ret = CPSW_XDP_PASS;
- break;
- case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
- if (unlikely(!xdpf))
- goto drop;
-
- cpsw_xdp_tx_frame(priv, xdpf, page);
- break;
- case XDP_REDIRECT:
- if (xdp_do_redirect(ndev, xdp, prog))
- goto drop;
-
- /* Have to flush here, per packet, instead of doing it in bulk
- * at the end of the napi handler. The RX devices on this
- * particular hardware is sharing a common queue, so the
- * incoming device might change per packet.
- */
- xdp_do_flush_map();
- break;
- default:
- bpf_warn_invalid_xdp_action(act);
- /* fall through */
- case XDP_ABORTED:
- trace_xdp_exception(ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
- case XDP_DROP:
- goto drop;
- }
-out:
- rcu_read_unlock();
- return ret;
-drop:
- rcu_read_unlock();
- page_pool_recycle_direct(cpsw->page_pool[ch], page);
- return ret;
-}
-
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
len += CPSW_HEADROOM;
@@ -552,123 +341,6 @@ static unsigned int cpsw_rxbuf_total_len(unsigned int len)
return SKB_DATA_ALIGN(len);
}
-static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
- int size)
-{
- struct page_pool_params pp_params;
- struct page_pool *pool;
-
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = size;
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dma_dir = DMA_BIDIRECTIONAL;
- pp_params.dev = cpsw->dev;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- dev_err(cpsw->dev, "cannot create rx page pool\n");
-
- return pool;
-}
-
-static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct xdp_rxq_info *rxq;
- struct page_pool *pool;
- int ret;
-
- pool = cpsw->page_pool[ch];
- rxq = &priv->xdp_rxq[ch];
-
- ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
- if (ret)
- return ret;
-
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
-
- if (!xdp_rxq_info_is_reg(rxq))
- return;
-
- xdp_rxq_info_unreg(rxq);
-}
-
-static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
-{
- struct page_pool *pool;
- int ret = 0, pool_size;
-
- pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- pool = cpsw_create_page_pool(cpsw, pool_size);
- if (IS_ERR(pool))
- ret = PTR_ERR(pool);
- else
- cpsw->page_pool[ch] = pool;
-
- return ret;
-}
-
-void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
- }
-
- page_pool_destroy(cpsw->page_pool[ch]);
- cpsw->page_pool[ch] = NULL;
- }
-}
-
-int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch, ret;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- ret = cpsw_create_rx_pool(cpsw, ch);
- if (ret)
- goto err_cleanup;
-
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
- if (ret)
- goto err_cleanup;
- }
- }
-
- return 0;
-
-err_cleanup:
- cpsw_destroy_xdp_rxqs(cpsw);
-
- return ret;
-}
-
static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
@@ -735,7 +407,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
- ret = cpsw_run_xdp(priv, ch, &xdp, page);
+ port = priv->emac_port + cpsw->data.dual_emac;
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
if (ret != CPSW_XDP_PASS)
goto requeue;
@@ -785,274 +458,6 @@ requeue:
}
}
-void cpsw_split_res(struct cpsw_common *cpsw)
-{
- u32 consumed_rate = 0, bigest_rate = 0;
- struct cpsw_vector *txv = cpsw->txv;
- int i, ch_weight, rlim_ch_num = 0;
- int budget, bigest_rate_ch = 0;
- u32 ch_rate, max_rate;
- int ch_budget = 0;
-
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (!ch_rate)
- continue;
-
- rlim_ch_num++;
- consumed_rate += ch_rate;
- }
-
- if (cpsw->tx_ch_num == rlim_ch_num) {
- max_rate = consumed_rate;
- } else if (!rlim_ch_num) {
- ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
- bigest_rate = 0;
- max_rate = consumed_rate;
- } else {
- max_rate = cpsw->speed * 1000;
-
- /* if max_rate is less then expected due to reduced link speed,
- * split proportionally according next potential max speed
- */
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
- (cpsw->tx_ch_num - rlim_ch_num);
- bigest_rate = (max_rate - consumed_rate) /
- (cpsw->tx_ch_num - rlim_ch_num);
- }
-
- /* split tx weight/budget */
- budget = CPSW_POLL_WEIGHT;
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
- if (!txv[i].budget)
- txv[i].budget++;
- if (ch_rate > bigest_rate) {
- bigest_rate_ch = i;
- bigest_rate = ch_rate;
- }
-
- ch_weight = (ch_rate * 100) / max_rate;
- if (!ch_weight)
- ch_weight++;
- cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
- } else {
- txv[i].budget = ch_budget;
- if (!bigest_rate_ch)
- bigest_rate_ch = i;
- cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
- }
-
- budget -= txv[i].budget;
- }
-
- if (budget)
- txv[bigest_rate_ch].budget += budget;
-
- /* split rx budget */
- budget = CPSW_POLL_WEIGHT;
- ch_budget = budget / cpsw->rx_ch_num;
- for (i = 0; i < cpsw->rx_ch_num; i++) {
- cpsw->rxv[i].budget = ch_budget;
- budget -= ch_budget;
- }
-
- if (budget)
- cpsw->rxv[0].budget += budget;
-}
-
-static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- writel(0, &cpsw->wr_regs->tx_en);
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[1]);
- cpsw->tx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_tx);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
- writel(0, &cpsw->wr_regs->rx_en);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[0]);
- cpsw->rx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_rx);
- return IRQ_HANDLED;
-}
-
-static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
-{
- u32 ch_map;
- int num_tx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- struct cpsw_vector *txv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
- if (!(ch_map & 0x80))
- continue;
-
- txv = &cpsw->txv[ch];
- if (unlikely(txv->budget > budget - num_tx))
- cur_budget = budget - num_tx;
- else
- cur_budget = txv->budget;
-
- num_tx += cpdma_chan_process(txv->ch, cur_budget);
- if (num_tx >= budget)
- break;
- }
-
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- }
-
- return num_tx;
-}
-
-static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- int num_tx;
-
- num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- if (cpsw->tx_irq_disabled) {
- cpsw->tx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[1]);
- }
- }
-
- return num_tx;
-}
-
-static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
-{
- u32 ch_map;
- int num_rx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- struct cpsw_vector *rxv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
- for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
- continue;
-
- rxv = &cpsw->rxv[ch];
- if (unlikely(rxv->budget > budget - num_rx))
- cur_budget = budget - num_rx;
- else
- cur_budget = rxv->budget;
-
- num_rx += cpdma_chan_process(rxv->ch, cur_budget);
- if (num_rx >= budget)
- break;
- }
-
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- }
-
- return num_rx;
-}
-
-static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- int num_rx;
-
- num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- if (cpsw->rx_irq_disabled) {
- cpsw->rx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[0]);
- }
- }
-
- return num_rx;
-}
-
-static inline void soft_reset(const char *module, void __iomem *reg)
-{
- unsigned long timeout = jiffies + HZ;
-
- writel_relaxed(1, reg);
- do {
- cpu_relax();
- } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
-
- WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
-}
-
-static void cpsw_set_slave_mac(struct cpsw_slave *slave,
- struct cpsw_priv *priv)
-{
- slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
- slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
-}
-
-static bool cpsw_shp_is_off(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = 7 << shift;
- val = val & mask;
-
- return !val;
-}
-
-static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = (1 << --fifo) << shift;
- val = on ? val | mask : val & ~mask;
-
- writel_relaxed(val, &cpsw->regs->ptype);
-}
-
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1118,44 +523,6 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave->mac_control = mac_control;
}
-static int cpsw_get_common_speed(struct cpsw_common *cpsw)
-{
- int i, speed;
-
- for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
- speed += cpsw->slaves[i].phy->speed;
-
- return speed;
-}
-
-static int cpsw_need_resplit(struct cpsw_common *cpsw)
-{
- int i, rlim_ch_num;
- int speed, ch_rate;
-
- /* re-split resources only in case speed was changed */
- speed = cpsw_get_common_speed(cpsw);
- if (speed == cpsw->speed || !speed)
- return 0;
-
- cpsw->speed = speed;
-
- for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
- if (!ch_rate)
- break;
-
- rlim_ch_num++;
- }
-
- /* cases not dependent on speed */
- if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
- return 0;
-
- return 1;
-}
-
static void cpsw_adjust_link(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1348,51 +715,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-int cpsw_fill_rx_channels(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct page_pool *pool;
- struct page *page;
- int ch_buf_num;
- int ch, i, ret;
- dma_addr_t dma;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- pool = cpsw->page_pool[ch];
- ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- for (i = 0; i < ch_buf_num; i++) {
- page = page_pool_dev_alloc_pages(pool);
- if (!page) {
- cpsw_err(priv, ifup, "allocate rx page err\n");
- return -ENOMEM;
- }
-
- xmeta = page_address(page) + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = ch;
-
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
- ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
- page, dma,
- cpsw->rx_packet_max,
- 0);
- if (ret < 0) {
- cpsw_err(priv, ifup,
- "cannot submit page to channel %d rx, error %d\n",
- ch, ret);
- page_pool_recycle_direct(pool, page);
- return ret;
- }
- }
-
- cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
- ch, ch_buf_num);
- }
-
- return 0;
-}
-
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
{
u32 slave_port;
@@ -1410,221 +732,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
cpsw_sl_ctl_reset(slave->mac_sl);
}
-static int cpsw_tc_to_fifo(int tc, int num_tc)
-{
- if (tc == num_tc - 1)
- return 0;
-
- return CPSW_FIFO_SHAPERS_NUM - tc;
-}
-
-static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 val = 0, send_pct, shift;
- struct cpsw_slave *slave;
- int pct = 0, i;
-
- if (bw > priv->shp_cfg_speed * 1000)
- goto err;
-
- /* shaping has to stay enabled for highest fifos linearly
- * and fifo bw no more then interface can allow
- */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- send_pct = slave_read(slave, SEND_PERCENT);
- for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
- if (!bw) {
- if (i >= fifo || !priv->fifo_bw[i])
- continue;
-
- dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
- continue;
- }
-
- if (!priv->fifo_bw[i] && i > fifo) {
- dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
- return -EINVAL;
- }
-
- shift = (i - 1) * 8;
- if (i == fifo) {
- send_pct &= ~(CPSW_PCT_MASK << shift);
- val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
- if (!val)
- val = 1;
-
- send_pct |= val << shift;
- pct += val;
- continue;
- }
-
- if (priv->fifo_bw[i])
- pct += (send_pct >> shift) & CPSW_PCT_MASK;
- }
-
- if (pct >= 100)
- goto err;
-
- slave_write(slave, send_pct, SEND_PERCENT);
- priv->fifo_bw[fifo] = bw;
-
- dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
- DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
-
- return 0;
-err:
- dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
- return -EINVAL;
-}
-
-static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 tx_in_ctl_rg, val;
- int ret;
-
- ret = cpsw_set_fifo_bw(priv, fifo, bw);
- if (ret)
- return ret;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
-
- if (!bw)
- cpsw_fifo_shp_on(priv, fifo, bw);
-
- val = slave_read(slave, tx_in_ctl_rg);
- if (cpsw_shp_is_off(priv)) {
- /* disable FIFOs rate limited queues */
- val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
-
- /* set type of FIFO queues to normal priority mode */
- val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
-
- /* set type of FIFO queues to be rate limited */
- if (bw)
- val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
- else
- priv->shp_cfg_speed = 0;
- }
-
- /* toggle a FIFO rate limited queue */
- if (bw)
- val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- else
- val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- slave_write(slave, val, tx_in_ctl_rg);
-
- /* FIFO transmit shape enable */
- cpsw_fifo_shp_on(priv, fifo, bw);
- return 0;
-}
-
-/* Defaults:
- * class A - prio 3
- * class B - prio 2
- * shaping for class A should be set first
- */
-static int cpsw_set_cbs(struct net_device *ndev,
- struct tc_cbs_qopt_offload *qopt)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int prev_speed = 0;
- int tc, ret, fifo;
- u32 bw = 0;
-
- tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
-
- /* enable channels in backward order, as highest FIFOs must be rate
- * limited first and for compliance with CPDMA rate limited channels
- * that also used in bacward order. FIFO0 cannot be rate limited.
- */
- fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
- if (!fifo) {
- dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
- return -EINVAL;
- }
-
- /* do nothing, it's disabled anyway */
- if (!qopt->enable && !priv->fifo_bw[fifo])
- return 0;
-
- /* shapers can be set if link speed is known */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- if (slave->phy && slave->phy->link) {
- if (priv->shp_cfg_speed &&
- priv->shp_cfg_speed != slave->phy->speed)
- prev_speed = priv->shp_cfg_speed;
-
- priv->shp_cfg_speed = slave->phy->speed;
- }
-
- if (!priv->shp_cfg_speed) {
- dev_err(priv->dev, "Link speed is not known");
- return -1;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- bw = qopt->enable ? qopt->idleslope : 0;
- ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
- if (ret) {
- priv->shp_cfg_speed = prev_speed;
- prev_speed = 0;
- }
-
- if (bw && prev_speed)
- dev_warn(priv->dev,
- "Speed was changed, CBS shaper speeds are changed!");
-
- pm_runtime_put_sync(cpsw->dev);
- return ret;
-}
-
-static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- int fifo, bw;
-
- for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
- bw = priv->fifo_bw[fifo];
- if (!bw)
- continue;
-
- cpsw_set_fifo_rlimit(priv, fifo, bw);
- }
-}
-
-static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 tx_prio_map = 0;
- int i, tc, fifo;
- u32 tx_prio_rg;
-
- if (!priv->mqprio_hw)
- return;
-
- for (i = 0; i < 8; i++) {
- tc = netdev_get_prio_tc_map(priv->ndev, i);
- fifo = CPSW_FIFO_SHAPERS_NUM - tc;
- tx_prio_map |= fifo << (4 * i);
- }
-
- tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave_write(slave, tx_prio_map, tx_prio_rg);
-}
-
static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
{
struct cpsw_priv *priv = arg;
@@ -1853,207 +960,6 @@ fail:
return NETDEV_TX_BUSY;
}
-#if IS_ENABLED(CONFIG_TI_CPTS)
-
-static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
- u32 ts_en, seq_id;
-
- if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
- slave_write(slave, 0, CPSW1_TS_CTL);
- return;
- }
-
- seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
- ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
-
- if (priv->tx_ts_enabled)
- ts_en |= CPSW_V1_TS_TX_EN;
-
- if (priv->rx_ts_enabled)
- ts_en |= CPSW_V1_TS_RX_EN;
-
- slave_write(slave, ts_en, CPSW1_TS_CTL);
- slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
-}
-
-static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
-{
- struct cpsw_slave *slave;
- struct cpsw_common *cpsw = priv->cpsw;
- u32 ctrl, mtype;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
-
- ctrl = slave_read(slave, CPSW2_CONTROL);
- switch (cpsw->version) {
- case CPSW_VERSION_2:
- ctrl &= ~CTRL_V2_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V2_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V2_RX_TS_BITS;
- break;
- case CPSW_VERSION_3:
- default:
- ctrl &= ~CTRL_V3_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V3_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V3_RX_TS_BITS;
- break;
- }
-
- mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
-
- slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
- slave_write(slave, ctrl, CPSW2_CONTROL);
- writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
- writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
- return -ERANGE;
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- priv->rx_ts_enabled = 0;
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- break;
- default:
- return -ERANGE;
- }
-
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
-
- switch (cpsw->version) {
- case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(priv);
- break;
- case CPSW_VERSION_2:
- case CPSW_VERSION_3:
- cpsw_hwtstamp_v2(priv);
- break;
- default:
- WARN_ON(1);
- }
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-#endif /*CONFIG_TI_CPTS*/
-
-static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
-
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
-}
-
-static void cpsw_ndo_tx_timeout(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ch;
-
- cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- ndev->stats.tx_errors++;
- cpsw_intr_disable(cpsw);
- for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_stop(cpsw->txv[ch].ch);
- cpdma_chan_start(cpsw->txv[ch].ch);
- }
-
- cpsw_intr_enable(cpsw);
- netif_trans_update(ndev);
- netif_tx_wake_all_queues(ndev);
-}
-
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -2215,168 +1121,13 @@ err:
return ret;
}
-static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 min_rate;
- u32 ch_rate;
- int i, ret;
-
- ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
- if (ch_rate == rate)
- return 0;
-
- ch_rate = rate * 1000;
- min_rate = cpdma_chan_get_min_rate(cpsw->dma);
- if ((ch_rate < min_rate && ch_rate)) {
- dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
- min_rate);
- return -EINVAL;
- }
-
- if (rate > cpsw->speed) {
- dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
- return -EINVAL;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
- pm_runtime_put(cpsw->dev);
-
- if (ret)
- return ret;
-
- /* update rates for slaves tx queues */
- for (i = 0; i < cpsw->data.slaves; i++) {
- slave = &cpsw->slaves[i];
- if (!slave->ndev)
- continue;
-
- netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
- }
-
- cpsw_split_res(cpsw);
- return ret;
-}
-
-static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
-{
- struct tc_mqprio_qopt_offload *mqprio = type_data;
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int fifo, num_tc, count, offset;
- struct cpsw_slave *slave;
- u32 tx_prio_map = 0;
- int i, tc, ret;
-
- num_tc = mqprio->qopt.num_tc;
- if (num_tc > CPSW_TC_NUM)
- return -EINVAL;
-
- if (mqprio->mode != TC_MQPRIO_MODE_DCB)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- if (num_tc) {
- for (i = 0; i < 8; i++) {
- tc = mqprio->qopt.prio_tc_map[i];
- fifo = cpsw_tc_to_fifo(tc, num_tc);
- tx_prio_map |= fifo << (4 * i);
- }
-
- netdev_set_num_tc(ndev, num_tc);
- for (i = 0; i < num_tc; i++) {
- count = mqprio->qopt.count[i];
- offset = mqprio->qopt.offset[i];
- netdev_set_tc_queue(ndev, i, count, offset);
- }
- }
-
- if (!mqprio->qopt.hw) {
- /* restore default configuration */
- netdev_reset_tc(ndev);
- tx_prio_map = TX_PRIORITY_MAPPING;
- }
-
- priv->mqprio_hw = mqprio->qopt.hw;
-
- offset = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- slave_write(slave, tx_prio_map, offset);
-
- pm_runtime_put_sync(cpsw->dev);
-
- return 0;
-}
-
-static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_CBS:
- return cpsw_set_cbs(ndev, type_data);
-
- case TC_SETUP_QDISC_MQPRIO:
- return cpsw_set_mqprio(ndev, type_data);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
-{
- struct bpf_prog *prog = bpf->prog;
-
- if (!priv->xdpi.prog && !prog)
- return 0;
-
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
- WRITE_ONCE(priv->xdp_prog, prog);
-
- xdp_attachment_setup(&priv->xdpi, bpf);
-
- return 0;
-}
-
-static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
-
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return cpsw_xdp_prog_setup(priv, bpf);
-
- case XDP_QUERY_PROG:
- return xdp_attachment_query(&priv->xdpi, bpf);
-
- default:
- return -EINVAL;
- }
-}
-
static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
struct xdp_frame *xdpf;
- int i, drops = 0;
+ int i, drops = 0, port;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -2389,7 +1140,8 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
continue;
}
- if (cpsw_xdp_tx_frame(priv, xdpf, NULL))
+ port = priv->emac_port + cpsw->data.dual_emac;
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
drops++;
}
@@ -2619,11 +1371,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
i);
goto no_phy_slave;
}
- slave_data->phy_if = of_get_phy_mode(slave_node);
- if (slave_data->phy_if < 0) {
+ ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
+ if (ret) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- ret = slave_data->phy_if;
goto err_node_put;
}
@@ -2776,6 +1527,8 @@ static int cpsw_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, cpsw);
+ cpsw_slave_index = cpsw_slave_index_priv;
+
cpsw->dev = dev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 84025dcc78d5..929f3d3354e3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -5,6 +5,8 @@
* Copyright (C) 2012 Texas Instruments
*
*/
+#include <linux/bitmap.h>
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -382,6 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int mcast_members;
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
@@ -390,11 +393,15 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_port_mask(ale_entry, port_mask,
+ if (port_mask) {
+ mcast_members = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ mcast_members &= ~port_mask;
+ cpsw_ale_set_port_mask(ale_entry, mcast_members,
ale->port_mask_bits);
- else
+ } else {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
return 0;
@@ -415,7 +422,18 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
-int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int untag_mask)
+{
+ cpsw_ale_set_vlan_untag_force(ale_entry,
+ untag_mask, ale->vlan_field_bits);
+ if (untag_mask & ALE_PORT_HOST)
+ bitmap_set(ale->p0_untag_vid_mask, vid, 1);
+ else
+ bitmap_clear(ale->p0_untag_vid_mask, vid, 1);
+}
+
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
int reg_mcast, int unreg_mcast)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -427,8 +445,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
- cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
if (!ale->params.nu_switch_ale) {
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
ale->vlan_field_bits);
@@ -437,7 +455,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+ ale->vlan_field_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -450,6 +469,41 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
return 0;
}
+static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
+{
+ int reg_mcast, unreg_mcast;
+ int members, untag;
+
+ members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ members &= ~port_mask;
+
+ untag = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag &= members;
+ reg_mcast &= members;
+ unreg_mcast &= members;
+
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
+
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+ ale->vlan_field_bits);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+ ale->vlan_field_bits);
+ } else {
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
+ unreg_mcast);
+ }
+ cpsw_ale_set_vlan_member_list(ale_entry, members,
+ ale->vlan_field_bits);
+}
+
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -461,16 +515,83 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
- ale->vlan_field_bits);
- else
+ if (port_mask) {
+ cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
+ } else {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
+
return 0;
}
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mask, int unreg_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int reg_mcast_members, unreg_mcast_members;
+ int vlan_members, untag_members;
+ int idx, ret = 0;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ vlan_members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag_members = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+
+ vlan_members |= port_mask;
+ untag_members = (untag_members & ~port_mask) | untag_mask;
+ reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask;
+ unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask;
+
+ ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members,
+ reg_mcast_members, unreg_mcast_members);
+ if (ret) {
+ dev_err(ale->params.dev, "Unable to add vlan\n");
+ return ret;
+ }
+ dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members,
+ untag_mask);
+
+ return ret;
+}
+
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int unreg_members = 0;
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ unreg_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ if (add)
+ unreg_members |= unreg_mcast_mask;
+ else
+ unreg_members &= ~unreg_mcast_mask;
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members,
+ ale->vlan_field_bits);
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -779,6 +900,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
@@ -791,6 +913,13 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
if (!ale)
return NULL;
+ ale->p0_untag_vid_mask =
+ devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!ale->p0_untag_vid_mask)
+ return ERR_PTR(-ENOMEM);
+
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
@@ -862,6 +991,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
}
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
return ale;
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 370df254eb12..70d0955c2652 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -35,6 +35,7 @@ struct cpsw_ale {
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
+ unsigned long *p0_untag_vid_mask;
};
enum cpsw_ale_control {
@@ -115,4 +116,14 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
+{
+ return test_bit(vid, ale->p0_untag_vid_mask);
+}
+
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mcast, int unreg_mcast);
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add);
+
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
new file mode 100644
index 000000000000..71215db7934b
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -0,0 +1,2048 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "cpsw_switchdev.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+static int debug_level;
+static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+
+struct cpsw_devlink {
+ struct cpsw_common *cpsw;
+};
+
+enum cpsw_devlink_param_id {
+ CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ CPSW_DL_PARAM_SWITCH_MODE,
+ CPSW_DL_PARAM_ALE_BYPASS,
+};
+
+/* struct cpsw_common is not needed, kept here for compatibility
+ * reasons witrh the old driver
+ */
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ if (priv->emac_port == HOST_PORT_NUM)
+ return -1;
+
+ return priv->emac_port - 1;
+}
+
+static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
+{
+ return !cpsw->data.dual_emac;
+}
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ bool enable_uni = false;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw))
+ return;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev &&
+ (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
+ enable_uni = true;
+
+ if (!enable && enable_uni) {
+ enable = enable_uni;
+ dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable unknown unicast, reg/unreg mcast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 1);
+
+ dev_dbg(cpsw->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable unknown unicast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "promiscuity disabled\n");
+ }
+}
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret, slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (vid < 0)
+ vid = cpsw->slaves[slave_no].port_vlan;
+
+ mask = ALE_PORT_HOST;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
+ return;
+ }
+
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, priv->emac_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ int headroom = CPSW_HEADROOM;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpsw_common *cpsw;
+ struct net_device *ndev;
+ int port, ch, pkt_size;
+ struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int ret = 0;
+ dma_addr_t dma;
+
+ xmeta = pa + CPSW_XMETA_OFFSET;
+ cpsw = ndev_to_cpsw(xmeta->ndev);
+ ndev = xmeta->ndev;
+ pkt_size = cpsw->rx_packet_max;
+ ch = xmeta->ch;
+
+ if (status >= 0) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port)
+ ndev = cpsw->slaves[--port].ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
+
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->usage_count && status >= 0) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, instead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue page back to cpdma.
+ */
+ new_page = page;
+ goto requeue;
+ }
+
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
+ return;
+ }
+
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ if (priv->xdp_prog) {
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ xdp.data = pa + CPSW_HEADROOM +
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ xdp.data_end = xdp.data + len -
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ } else {
+ xdp.data = pa + CPSW_HEADROOM;
+ xdp.data_end = xdp.data + len;
+ }
+
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp.data_hard_start = pa;
+ xdp.rxq = &priv->xdp_rxq[ch];
+
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ /* XDP prog might have changed packet data and boundaries */
+ len = xdp.data_end - xdp.data;
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
+ }
+
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb->offload_fwd_mark = priv->offload_fwd_mark;
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* unmap page as no netstack skb page recycling */
+ page_pool_release_page(pool, page);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
+}
+
+static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int mcast_mask;
+ u32 port_mask;
+ int ret;
+
+ port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
+
+ mcast_mask = ALE_PORT_HOST;
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = mcast_mask;
+
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+ unreg_mcast_mask);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ mcast_mask, ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev || !vid)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
+ /* restore MQPRIO offload */
+ cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* restore CBS offload */
+ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+}
+
+static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
+{
+ char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
+
+ cpsw_ale_add_mcast(cpsw->ale, stpa,
+ ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ ALE_PORT_1 | ALE_PORT_2);
+
+ cpsw_init_stp_ale_entry(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 control_reg;
+
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&cpsw->regs->control);
+ control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+ writel(control_reg, &cpsw->regs->control);
+
+ /* setup host port priority mapping */
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation */
+ writel_relaxed(0, &cpsw->regs->ptype);
+
+ /* enable statistics collection only on all ports */
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+ /* Enable internal fifo flow control */
+ writel(0x7, &cpsw->regs->flow_control);
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_init_host_port_switch(cpsw);
+ else
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+}
+
+static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 0);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 0);
+ /* disabling SA_UPDATE required to make stp work, without this setting
+ * Host MAC addresses will jump between ports.
+ * As per TRM MAC address can be defined as unicast supervisory (super)
+ * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
+ * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
+ * causes STP packets to be dropped due to ingress filter
+ * if (source address found) and (secure) and
+ * (receive port number != port_number))
+ * then discard the packet
+ */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NO_SA_UPDATE, 1);
+
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ struct phy_device *phy;
+ u32 mac_control = 0;
+
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ phy = slave->phy;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
+
+ if (priv->rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (priv->tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
+ } else {
+ netif_tx_stop_all_queues(ndev);
+
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
+ }
+
+ if (mac_control != slave->mac_control)
+ phy_print_status(phy);
+
+ slave->mac_control = mac_control;
+
+ if (phy->link && cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct phy_device *phy;
+
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+
+ /* setup priority mapping */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ break;
+ }
+
+ /* setup max packet size, and mac address */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_port_add_switch_def_ale_entries(priv, slave);
+ else
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+
+ if (!slave->data->phy_node)
+ dev_err(priv->dev, "no phy found on slave %d\n",
+ slave->slave_num);
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (!phy) {
+ dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+ slave->data->phy_node,
+ slave->slave_num);
+ return;
+ }
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
+ slave->data->phy_if);
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+
+ cpsw_info(priv, ifdown, "shutting down ndev\n");
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ if (slave->phy)
+ phy_stop(slave->phy);
+
+ netif_tx_stop_all_queues(priv->ndev);
+
+ if (slave->phy) {
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+ }
+
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
+
+ if (cpsw->usage_count <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ cpsw->usage_count--;
+ pm_runtime_put_sync(cpsw->dev);
+ return 0;
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ dev_info(priv->dev, "starting ndev. mode: %s\n",
+ cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto pm_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto pm_cleanup;
+ }
+
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
+ cpsw_init_host_port(priv);
+ cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
+
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ cpsw_restore(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (cpsw->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ cpsw->usage_count++;
+
+ return 0;
+
+err_cleanup:
+ cpsw_ndo_stop(ndev);
+
+pm_cleanup:
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
+
+ if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ ndev->stats.tx_dropped++;
+ return NET_XMIT_DROP;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ ndev->stats.tx_dropped++;
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, slave_no;
+ int flags = 0;
+ u16 vid = 0;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ vid = cpsw->slaves[slave_no].port_vlan;
+ flags = ALE_VLAN | ALE_SECURE;
+
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ flags, vid);
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+ flags, vid);
+
+ ether_addr_copy(priv->mac_addr, addr->sa_data);
+ ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
+
+ pm_runtime_put(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan)
+ goto err;
+ }
+
+ dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = snprintf(name, len, "p%d", priv->emac_port);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct xdp_frame *xdpf;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
+ drops++;
+ }
+
+ return n - drops;
+}
+
+static int cpsw_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ppid->id_len = sizeof(cpsw->base_mac);
+ memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
+ .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_get_port_parent_id = cpsw_get_port_parent_id,
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(cpsw->dev);
+ strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!cpsw->slaves[slave_no].phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
+ return -EINVAL;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(cpsw->slaves[slave_no].phy,
+ priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
+ .get_regs_len = cpsw_get_regs_len,
+ .get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
+};
+
+static int cpsw_probe_dt(struct cpsw_common *cpsw)
+{
+ struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device *dev = cpsw->dev;
+ int ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ tmp_node = of_get_child_by_name(node, "ethernet-ports");
+ if (!tmp_node)
+ return -ENOENT;
+ data->slaves = of_get_child_count(tmp_node);
+ if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
+ of_node_put(tmp_node);
+ return -ENOENT;
+ }
+
+ data->active_slave = 0;
+ data->channels = CPSW_MAX_QUEUES;
+ data->ale_entries = CPSW_ALE_NUM_ENTRIES;
+ data->dual_emac = 1;
+ data->bd_ram_size = CPSW_BD_RAM_SIZE;
+ data->mac_control = 0;
+
+ data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
+ return -ENOMEM;
+
+ /* Populate all the child nodes here...
+ */
+ ret = devm_of_platform_populate(dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "Doesn't have any child node\n");
+
+ for_each_child_of_node(tmp_node, port_np) {
+ struct cpsw_slave_data *slave_data;
+ const void *mac_addr;
+ u32 port_id;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
+ dev_err(dev, "%pOF has invalid port_id %u\n",
+ port_np, port_id);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ slave_data = &data->slave_data[port_id - 1];
+
+ slave_data->disabled = !of_device_is_available(port_np);
+ if (slave_data->disabled)
+ continue;
+
+ slave_data->slave_node = port_np;
+ slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(slave_data->ifphy)) {
+ ret = PTR_ERR(slave_data->ifphy);
+ dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+ slave_data->phy_node = of_node_get(port_np);
+ } else {
+ slave_data->phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!slave_data->phy_node) {
+ dev_err(dev, "%pOF no phy found\n", port_np);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ ret = of_get_phy_mode(port_np, &slave_data->phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(slave_data->mac_addr, mac_addr);
+ } else {
+ ret = ti_cm_get_macid(dev, port_id - 1,
+ slave_data->mac_addr);
+ if (ret)
+ goto err_node_put;
+ }
+
+ if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
+ &prop)) {
+ dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
+ port_np);
+ slave_data->dual_emac_res_vlan = port_id;
+ dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
+ port_np, slave_data->dual_emac_res_vlan);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
+ of_node_put(tmp_node);
+ return 0;
+
+err_node_put:
+ of_node_put(port_np);
+ return ret;
+}
+
+static void cpsw_remove_dt(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+ struct device_node *port_np = slave_data->phy_node;
+
+ if (port_np) {
+ if (of_phy_is_fixed_link(port_np))
+ of_phy_deregister_fixed_link(port_np);
+
+ of_node_put(port_np);
+ }
+ }
+}
+
+static int cpsw_create_ports(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct net_device *ndev, *napi_ndev = NULL;
+ struct device *dev = cpsw->dev;
+ struct cpsw_priv *priv;
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (slave_data->disabled)
+ continue;
+
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES,
+ CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = i + 1;
+
+ if (is_valid_ether_addr(slave_data->mac_addr)) {
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+ dev_info(cpsw->dev, "Detected MACID = %pM\n",
+ priv->mac_addr);
+ } else {
+ eth_random_addr(slave_data->mac_addr);
+ dev_info(cpsw->dev, "Random MACID = %pM\n",
+ priv->mac_addr);
+ }
+ ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+
+ cpsw->slaves[i].ndev = ndev;
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!napi_ndev) {
+ /* CPSW Host port CPDMA interface is shared between
+ * ports and there is only one TX and one RX IRQs
+ * available for all possible TX and RX channels
+ * accordingly.
+ */
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ?
+ cpsw_rx_poll : cpsw_rx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ?
+ cpsw_tx_poll : cpsw_tx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ }
+
+ napi_ndev = ndev;
+ }
+
+ return ret;
+}
+
+static void cpsw_unregister_ports(struct cpsw_common *cpsw)
+{
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ unregister_netdev(cpsw->slaves[i].ndev);
+ }
+}
+
+static int cpsw_register_ports(struct cpsw_common *cpsw)
+{
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ /* register the network device */
+ ret = register_netdev(cpsw->slaves[i].ndev);
+ if (ret) {
+ dev_err(cpsw->dev,
+ "cpsw: err registering net device%d\n", i);
+ cpsw->slaves[i].ndev = NULL;
+ break;
+ }
+ }
+
+ if (ret)
+ cpsw_unregister_ports(cpsw);
+ return ret;
+}
+
+bool cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &cpsw_netdev_ops) {
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return !cpsw->data.dual_emac;
+ }
+
+ return false;
+}
+
+static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
+{
+ int set_val = 0;
+ int i;
+
+ if (!cpsw->ale_bypass &&
+ (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
+ set_val = 1;
+
+ dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *sl_ndev = cpsw->slaves[i].ndev;
+ struct cpsw_priv *priv = netdev_priv(sl_ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+static int cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (!cpsw->br_members) {
+ cpsw->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (cpsw->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ cpsw->br_members |= BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ return NOTIFY_DONE;
+}
+
+static void cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ cpsw->br_members &= ~BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ if (!cpsw->br_members)
+ cpsw->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = cpsw_netdevice_port_link(ndev,
+ info->upper_dev);
+ else
+ cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block cpsw_netdevice_nb __read_mostly = {
+ .notifier_call = cpsw_netdevice_event,
+};
+
+static int cpsw_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_netdevice_notifier(&cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops cpsw_devlink_ops = {
+};
+
+static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !cpsw->data.dual_emac;
+
+ return 0;
+}
+
+static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int vlan = cpsw->data.default_vlan;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->data.dual_emac)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ cpsw->data.dual_emac = !switch_en;
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ if (switch_en)
+ vlan = cpsw->data.default_vlan;
+ else
+ vlan = slave->data->dual_emac_res_vlan;
+ slave->port_vlan = vlan;
+ }
+ goto exit;
+ }
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ slave->port_vlan = vlan;
+ if (netif_running(sl_ndev))
+ cpsw_port_add_switch_def_ale_entries(priv,
+ slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = false;
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(slave->ndev);
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = true;
+ }
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int ret = -EOPNOTSUPP;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
+ ctx->val.vbool);
+ if (!ret) {
+ cpsw->ale_bypass = ctx->val.vbool;
+ cpsw_port_offload_fwd_mark_update(cpsw);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
+ "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
+ NULL),
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
+ "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
+};
+
+static int cpsw_register_devlink(struct cpsw_common *cpsw)
+{
+ struct device *dev = cpsw->dev;
+ struct cpsw_devlink *dl_priv;
+ int ret = 0;
+
+ cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!cpsw->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(cpsw->devlink);
+ dl_priv->cpsw = cpsw;
+
+ ret = devlink_register(cpsw->devlink, dev);
+ if (ret) {
+ dev_err(dev, "DL reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "DL params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+
+ devlink_params_publish(cpsw->devlink);
+ return ret;
+
+dl_unreg:
+ devlink_unregister(cpsw->devlink);
+dl_free:
+ devlink_free(cpsw->devlink);
+ return ret;
+}
+
+static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
+{
+ devlink_params_unpublish(cpsw->devlink);
+ devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ devlink_unregister(cpsw->devlink);
+ devlink_free(cpsw->devlink);
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw-switch"},
+ { .compatible = "ti,am335x-cpsw-switch"},
+ { .compatible = "ti,am4372-cpsw-switch"},
+ { .compatible = "ti,dra7-cpsw-switch"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ struct cpsw_common *cpsw;
+ struct resource *ss_res;
+ struct gpio_descs *mode;
+ void __iomem *ss_regs;
+ int ret = 0, ch;
+ struct clk *clk;
+ int irq;
+
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
+ cpsw_slave_index = cpsw_slave_index_priv;
+
+ cpsw->dev = dev;
+
+ cpsw->slaves = devm_kcalloc(dev,
+ CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave),
+ GFP_KERNEL);
+ if (!cpsw->slaves)
+ return -ENOMEM;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ return ret;
+ }
+ cpsw->regs = ss_regs;
+
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
+ platform_set_drvdata(pdev, cpsw);
+ /* This may be required here for child devices. */
+ pm_runtime_enable(dev);
+
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = cpsw_probe_dt(cpsw);
+ if (ret)
+ goto clean_dt_ret;
+
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
+
+ cpsw->rx_packet_max = rx_packet_max;
+ cpsw->descs_pool_size = descs_pool_size;
+ eth_random_addr(cpsw->base_mac);
+
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
+ goto clean_dt_ret;
+
+ cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
+ ss_regs + CPSW1_WR_OFFSET :
+ ss_regs + CPSW2_WR_OFFSET;
+
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_cpts;
+ }
+
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
+ goto clean_cpts;
+ }
+ cpsw_split_res(cpsw);
+
+ /* setup netdevs */
+ ret = cpsw_create_ports(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = cpsw_register_notifiers(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ ret = cpsw_register_devlink(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ ret = cpsw_register_ports(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
+ &ss_res->start, descs_pool_size,
+ cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
+ CPSW_MINOR_VERSION(cpsw->version),
+ CPSW_RTL_VERSION(cpsw->version));
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+clean_unregister_notifiers:
+ cpsw_unregister_notifiers(cpsw);
+clean_unregister_netdev:
+ cpsw_unregister_ports(cpsw);
+clean_cpts:
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ cpsw_unregister_notifiers(cpsw);
+ cpsw_unregister_devlink(cpsw);
+ cpsw_unregister_ports(cpsw);
+
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw-switch",
+ .of_match_table = cpsw_of_mtable,
+ },
+ .probe = cpsw_probe,
+ .remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 476d050a022c..b833cc1d188c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -5,20 +5,415 @@
* Copyright (C) 2019 Texas Instruments
*/
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include "cpsw.h"
#include "cpts.h"
#include "cpsw_ale.h"
#include "cpsw_priv.h"
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
+
+void cpsw_intr_enable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
+}
+
+void cpsw_intr_disable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
+}
+
+void cpsw_tx_handler(void *token, int len, int status)
+{
+ struct cpsw_meta_xdp *xmeta;
+ struct xdp_frame *xdpf;
+ struct net_device *ndev;
+ struct netdev_queue *txq;
+ struct sk_buff *skb;
+ int ch;
+
+ if (cpsw_is_xdpf_handle(token)) {
+ xdpf = cpsw_handle_to_xdpf(token);
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ ndev = xmeta->ndev;
+ ch = xmeta->ch;
+ xdp_return_frame(xdpf);
+ } else {
+ skb = token;
+ ndev = skb->dev;
+ ch = skb_get_queue_mapping(skb);
+ cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
+ txq = netdev_get_tx_queue(ndev, ch);
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+}
+
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_tx);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ writel(0, &cpsw->wr_regs->rx_en);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_rx);
+ return IRQ_HANDLED;
+}
+
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *txv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
+ continue;
+
+ txv = &cpsw->txv[ch];
+ if (unlikely(txv->budget > budget - num_tx))
+ cur_budget = budget - num_tx;
+ else
+ cur_budget = txv->budget;
+
+ num_tx += cpdma_chan_process(txv->ch, cur_budget);
+ if (num_tx >= budget)
+ break;
+ }
+
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ }
+
+ return num_tx;
+}
+
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+ }
+
+ return num_tx;
+}
+
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *rxv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
+ if (!(ch_map & 0x01))
+ continue;
+
+ rxv = &cpsw->rxv[ch];
+ if (unlikely(rxv->budget > budget - num_rx))
+ cur_budget = budget - num_rx;
+ else
+ cur_budget = rxv->budget;
+
+ num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ }
+
+ return num_rx;
+}
+
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ return num_rx;
+}
+
+void cpsw_rx_vlan_encap(struct sk_buff *skb)
+{
+ struct cpsw_priv *priv = netdev_priv(skb->dev);
+ u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
+ struct cpsw_common *cpsw = priv->cpsw;
+ u16 vtag, vid, prio, pkt_type;
+
+ /* Remove VLAN header encapsulation word */
+ skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
+
+ pkt_type = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
+ /* Ignore unknown & Priority-tagged packets*/
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
+ pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
+ return;
+
+ vid = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
+ VLAN_VID_MASK;
+ /* Ignore vid 0 and pass packet as is */
+ if (!vid)
+ return;
+
+ /* Untag P0 packets if set for vlan */
+ if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
+ prio = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
+
+ vtag = (prio << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ /* strip vlan tag for VLAN-tagged packet */
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, VLAN_HLEN);
+ }
+}
+
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
+}
+
+void soft_reset(const char *module, void __iomem *reg)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ writel_relaxed(1, reg);
+ do {
+ cpu_relax();
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
+
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+void cpsw_ndo_tx_timeout(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
+
+ cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+ ndev->stats.tx_errors++;
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txv[ch].ch);
+ cpdma_chan_start(cpsw->txv[ch].ch);
+ }
+
+ cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+ int i, speed;
+
+ for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+ speed += cpsw->slaves[i].phy->speed;
+
+ return speed;
+}
+
+int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+ int i, rlim_ch_num;
+ int speed, ch_rate;
+
+ /* re-split resources only in case speed was changed */
+ speed = cpsw_get_common_speed(cpsw);
+ if (speed == cpsw->speed || !speed)
+ return 0;
+
+ cpsw->speed = speed;
+
+ for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+ if (!ch_rate)
+ break;
+
+ rlim_ch_num++;
+ }
+
+ /* cases not dependent on speed */
+ if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+ return 0;
+
+ return 1;
+}
+
+void cpsw_split_res(struct cpsw_common *cpsw)
+{
+ u32 consumed_rate = 0, bigest_rate = 0;
+ struct cpsw_vector *txv = cpsw->txv;
+ int i, ch_weight, rlim_ch_num = 0;
+ int budget, bigest_rate_ch = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else if (!rlim_ch_num) {
+ ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ bigest_rate = 0;
+ max_rate = consumed_rate;
+ } else {
+ max_rate = cpsw->speed * 1000;
+
+ /* if max_rate is less then expected due to reduced link speed,
+ * split proportionally according next potential max speed
+ */
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+ ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx weight/budget */
+ budget = CPSW_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget++;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+
+ ch_weight = (ch_rate * 100) / max_rate;
+ if (!ch_weight)
+ ch_weight++;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = CPSW_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size)
@@ -28,6 +423,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
struct cpsw_platform_data *data;
struct cpdma_params dma_params;
struct device *dev = cpsw->dev;
+ struct device_node *cpts_node;
void __iomem *cpts_regs;
int ret = 0, i;
@@ -122,11 +518,859 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
return -ENOMEM;
}
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
+ if (!cpts_node)
+ cpts_node = cpsw->dev->of_node;
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
}
+ of_node_put(cpts_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ u32 ts_en, seq_id;
+
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->tx_ts_enabled)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->rx_ts_enabled)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 ctrl, mtype;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ switch (cpsw->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V2_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+}
+
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 min_rate;
+ u32 ch_rate;
+ int i, ret;
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate)
+ return 0;
+
+ ch_rate = rate * 1000;
+ min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+ if ((ch_rate < min_rate && ch_rate)) {
+ dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+ min_rate);
+ return -EINVAL;
+ }
+
+ if (rate > cpsw->speed) {
+ dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+ pm_runtime_put(cpsw->dev);
+
+ if (ret)
+ return ret;
+
+ /* update rates for slaves tx queues */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ slave = &cpsw->slaves[i];
+ if (!slave->ndev)
+ continue;
+
+ netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+ }
+
+ cpsw_split_res(cpsw);
+ return ret;
+}
+
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct page_pool *pool;
+ struct page *page;
+ int ch_buf_num;
+ int ch, i, ret;
+ dma_addr_t dma;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ pool = cpsw->page_pool[ch];
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ for (i = 0; i < ch_buf_num; i++) {
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ cpsw_err(priv, ifup, "allocate rx page err\n");
+ return -ENOMEM;
+ }
+
+ xmeta = page_address(page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
+ page, dma,
+ cpsw->rx_packet_max,
+ 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit page to channel %d rx, error %d\n",
+ ch, ret);
+ page_pool_recycle_direct(pool, page);
+ return ret;
+ }
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+ int size)
+{
+ struct page_pool_params pp_params;
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = cpsw->dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ dev_err(cpsw->dev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
+{
+ struct page_pool *pool;
+ int ret = 0, pool_size;
+
+ pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ pool = cpsw_create_page_pool(cpsw, pool_size);
+ if (IS_ERR(pool))
+ ret = PTR_ERR(pool);
+ else
+ cpsw->page_pool[ch] = pool;
+
+ return ret;
+}
+
+static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int ret;
+
+ pool = cpsw->page_pool[ch];
+ rxq = &priv->xdp_rxq[ch];
+
+ ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
+ }
+
+ page_pool_destroy(cpsw->page_pool[ch]);
+ cpsw->page_pool[ch] = NULL;
+ }
+}
+
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ret = cpsw_create_rx_pool(cpsw, ch);
+ if (ret)
+ goto err_cleanup;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
+ if (ret)
+ goto err_cleanup;
+ }
+ }
+
+ return 0;
+
+err_cleanup:
+ cpsw_destroy_xdp_rxqs(cpsw);
+
+ return ret;
+}
+
+static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!priv->xdpi.prog && !prog)
+ return 0;
+
+ if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
+ return -EBUSY;
+
+ WRITE_ONCE(priv->xdp_prog, prog);
+
+ xdp_attachment_setup(&priv->xdpi, bpf);
+
+ return 0;
+}
+
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return cpsw_xdp_prog_setup(priv, bpf);
+
+ case XDP_QUERY_PROG:
+ return xdp_attachment_query(&priv->xdpi, bpf);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpdma_chan *txch;
+ dma_addr_t dma;
+ int ret;
+
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = 0;
+ txch = cpsw->txv[0].ch;
+
+ if (page) {
+ dma = page_pool_get_dma_addr(page);
+ dma += xdpf->headroom + sizeof(struct xdp_frame);
+ ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
+ dma, xdpf->len, port);
+ } else {
+ if (sizeof(*xmeta) > xdpf->headroom) {
+ xdp_return_frame_rx_napi(xdpf);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
+ xdpf->data, xdpf->len, port);
+ }
+
+ if (ret) {
+ priv->ndev->stats.tx_dropped++;
+ xdp_return_frame_rx_napi(xdpf);
+ }
+
+ return ret;
+}
+
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ int ret = CPSW_XDP_CONSUMED;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ ret = CPSW_XDP_PASS;
+ goto out;
+ }
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ ret = CPSW_XDP_PASS;
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ cpsw_xdp_tx_frame(priv, xdpf, page, port);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(ndev, xdp, prog))
+ goto drop;
+
+ /* Have to flush here, per packet, instead of doing it in bulk
+ * at the end of the napi handler. The RX devices on this
+ * particular hardware is sharing a common queue, so the
+ * incoming device might change per packet.
+ */
+ xdp_do_flush_map();
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ goto drop;
+ }
+out:
+ rcu_read_unlock();
+ return ret;
+drop:
+ rcu_read_unlock();
+ page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 362c5a986869..bc726356a72c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -54,6 +54,7 @@ do { \
#define HOST_PORT_NUM 0
#define CPSW_ALE_PORTS_NUM 3
+#define CPSW_SLAVE_PORTS_NUM 2
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -65,6 +66,7 @@ do { \
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
+#define CPSW1_WR_OFFSET 0x900
#define CPSW2_HOST_PORT_OFFSET 0x108
#define CPSW2_SLAVE_OFFSET 0x200
@@ -76,6 +78,7 @@ do { \
#define CPSW2_ALE_OFFSET 0xd00
#define CPSW2_SLIVER_OFFSET 0xd80
#define CPSW2_BD_OFFSET 0x2000
+#define CPSW2_WR_OFFSET 0x1200
#define CPDMA_RXTHRESH 0x0c0
#define CPDMA_RXFREE 0x0e0
@@ -113,12 +116,15 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
+#define CPSW_ALE_NUM_ENTRIES 1024
#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
#define CPSW_FIFO_SHAPE_EN_SHIFT 16
#define CPSW_FIFO_RATE_EN_SHIFT 20
#define CPSW_TC_NUM 4
#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
#define CPSW_PCT_MASK 0x7f
+#define CPSW_BD_RAM_SIZE 0x2000
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -275,10 +281,11 @@ struct cpsw_slave_data {
struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
+ phy_interface_t phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
struct phy *ifphy;
+ bool disabled;
};
struct cpsw_platform_data {
@@ -286,9 +293,9 @@ struct cpsw_platform_data {
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
@@ -344,10 +351,15 @@ struct cpsw_common {
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
struct cpts *cpts;
+ struct devlink *devlink;
int rx_ch_num, tx_ch_num;
int speed;
int usage_count;
struct page_pool *page_pool[CPSW_MAX_QUEUES];
+ u8 br_members;
+ struct net_device *hw_bridge_dev;
+ bool ale_bypass;
+ u8 base_mac[ETH_ALEN];
};
struct cpsw_priv {
@@ -368,19 +380,14 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
+ int offload_fwd_mark;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
-#define cpsw_slave_index(cpsw, priv) \
- ((cpsw->data.dual_emac) ? priv->emac_port : \
- cpsw->data.active_slave)
-
-static inline int cpsw_get_slave_port(u32 slave_num)
-{
- return slave_num + 1;
-}
+extern int (*cpsw_slave_index)(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv);
struct addr_sync_ctx {
struct net_device *ndev;
@@ -389,6 +396,35 @@ struct addr_sync_ctx {
int flush; /* flush flag */
};
+#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+
+#define CPSW_XDP_CONSUMED 1
+#define CPSW_XDP_PASS 0
+
+struct __aligned(sizeof(long)) cpsw_meta_xdp {
+ struct net_device *ndev;
+ int ch;
+};
+
+/* The buf includes headroom compatible with both skb and xdpf */
+#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
+
+static inline int cpsw_is_xdpf_handle(void *handle)
+{
+ return (unsigned long)handle & BIT(0);
+}
+
+static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
+{
+ return (void *)((unsigned long)xdpf | BIT(0));
+}
+
+static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
+{
+ return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size);
@@ -399,6 +435,29 @@ void cpsw_intr_disable(struct cpsw_common *cpsw);
void cpsw_tx_handler(void *token, int len, int status);
int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port);
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port);
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
+void cpsw_rx_vlan_encap(struct sk_buff *skb);
+void soft_reset(const char *module, void __iomem *reg);
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_ndo_tx_timeout(struct net_device *ndev);
+int cpsw_need_resplit(struct cpsw_common *cpsw);
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+bool cpsw_shp_is_off(struct cpsw_priv *priv);
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
new file mode 100644
index 000000000000..985a929bb957
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments switchdev Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_switchdev.h"
+
+struct cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct cpsw_priv *priv;
+ unsigned long event;
+};
+
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans, u8 state)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u8 cpsw_state;
+ int ret = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, cpsw_state);
+ dev_dbg(priv->dev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ unsigned long brport_flags)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ bool unreg_mcast_add = false;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (brport_flags & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+
+ return 0;
+}
+
+static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 cpsw_get_pvid(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 __iomem *port_vlan_reg;
+ u32 pvid;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg);
+ } else {
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ pvid = readl(port_vlan_reg);
+ }
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void __iomem *port_vlan_reg;
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ /* no barrier */
+ slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg);
+ } else {
+ /* CPU port */
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ writel(pvid, port_vlan_reg);
+ }
+}
+
+static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(priv->emac_port);
+ flags = priv->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ dev_err(priv->dev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (!pvid)
+ return ret;
+
+ cpsw_set_pvid(priv, vid, 0, 0);
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+ return ret;
+}
+
+static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == cpsw_get_pvid(priv))
+ cpsw_set_pvid(priv, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, vid);
+ dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int cpsw_port_vlans_add(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
+ priv->ndev->name, vlan->vid_begin, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_vlans_del(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_del(priv, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_mdb_add(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int cpsw_port_mdb_del(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int del_mask;
+ int err;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return err;
+}
+
+static int cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_add: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_add(priv, vlan, trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_add(priv, mdb, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_del: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_del(priv, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_del(priv, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct cpsw_switchdev_event_work, work);
+ struct cpsw_priv *priv = switchdev_work->priv;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port = priv->emac_port;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ cpsw_fdb_offload_notify(priv->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(priv->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct cpsw_switchdev_event_work *switchdev_work;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work);
+ switchdev_work->priv = priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = cpsw_switchdev_event,
+};
+
+static int cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = cpsw_switchdev_blocking_event,
+};
+
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h
new file mode 100644
index 000000000000..04a045dba7d4
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+
+#include <net/switchdev.h>
+
+bool cpsw_port_dev_check(const struct net_device *dev);
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw);
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 61136428e2c0..729ce09dded9 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -459,7 +459,7 @@ int cpts_register(struct cpts *cpts)
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
- timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2c1fac33136c..86a3f42a3dcc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2291,6 +2291,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
phy_interface_t phy_mode;
bool has_phy = false;
+ int err;
void (*hndlr)(struct net_device *) = gbe_adjust_link;
@@ -2320,11 +2321,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
slave->phy_port_t = PORT_MII;
} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
has_phy = true;
- phy_mode = of_get_phy_mode(slave->node);
+ err = of_get_phy_mode(slave->node, &phy_mode);
/* if phy-mode is not present, default to
* PHY_INTERFACE_MODE_RGMII
*/
- if (phy_mode < 0)
+ if (err)
phy_mode = PHY_INTERFACE_MODE_RGMII;
if (!phy_interface_mode_is_rgmii(phy_mode)) {
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 8d994cebb6b0..6304ebd8b5c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,6 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,11 +25,10 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
- AXI bus interface used in Xilinx Virtex FPGAs.
+ AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 676006f32f91..8f32db6d2c45 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1761,11 +1761,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
} else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret)
goto free_netdev;
- }
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1790,10 +1788,6 @@ static int axienet_probe(struct platform_device *pdev)
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "unable to get DMA memory resource\n");
- goto free_netdev;
- }
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 670ef682f268..4209d1cf57f6 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -853,6 +853,7 @@ struct multi_recv_comp {
struct nvsc_rsc {
const struct ndis_pkt_8021q_info *vlan;
const struct ndis_tcp_ip_checksum_info *csum_info;
+ const u32 *hash_info;
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 963509add611..5fa5c49e481b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
else if (flow.basic.n_proto == htons(ETH_P_IPV6))
hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
else
- hash = 0;
+ return 0;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ __skb_set_sw_hash(skb, hash, false);
}
return hash;
@@ -766,6 +766,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
nvchan->rsc.csum_info;
+ const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
int i;
@@ -795,14 +796,16 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->protocol == htons(ETH_P_IP))
netvsc_comp_ipcsum(skb);
- /* Do L4 checksum offload if enabled and present.
- */
+ /* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ if (hash_info)
+ skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
+
if (vlan) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index abaf8156d19d..c06178380ac8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -358,6 +358,7 @@ static inline
void rsc_add_data(struct netvsc_channel *nvchan,
const struct ndis_pkt_8021q_info *vlan,
const struct ndis_tcp_ip_checksum_info *csum_info,
+ const u32 *hash_info,
void *data, u32 len)
{
u32 cnt = nvchan->rsc.cnt;
@@ -368,6 +369,7 @@ void rsc_add_data(struct netvsc_channel *nvchan,
nvchan->rsc.vlan = vlan;
nvchan->rsc.csum_info = csum_info;
nvchan->rsc.pktlen = len;
+ nvchan->rsc.hash_info = hash_info;
}
nvchan->rsc.data[cnt] = data;
@@ -385,6 +387,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
+ const u32 *hash_info;
u32 data_offset;
void *data;
bool rsc_more = false;
@@ -411,6 +414,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+
pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
@@ -441,7 +446,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
+ rsc_add_data(nvchan, vlan, csum_info, hash_info,
+ data, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 43506948e444..89c046b204e0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -218,7 +218,6 @@ static int
cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
{
int ret;
- u8 status = 0xff;
struct spi_message msg;
struct spi_transfer xfer = {
.len = 0,
@@ -236,8 +235,6 @@ cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
priv->buf[0]);
ret = spi_sync(priv->spi, &msg);
- if (!ret)
- status = priv->buf[0];
dev_vdbg(&priv->spi->dev,
"buf[0] = %02x\n", priv->buf[0]);
mutex_unlock(&priv->buffer_mutex);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 14545a8797a8..a1c77cc00416 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,7 +68,6 @@ EXPORT_SYMBOL(blackhole_netdev);
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats *lb_stats;
int len;
skb_tx_timestamp(skb);
@@ -85,27 +84,20 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
- /* it's OK to use per_cpu_ptr() because BHs are off */
- lb_stats = this_cpu_ptr(dev->lstats);
-
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
- u64_stats_update_begin(&lb_stats->syncp);
- lb_stats->bytes += len;
- lb_stats->packets++;
- u64_stats_update_end(&lb_stats->syncp);
- }
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ dev_lstats_add(dev, len);
return NETDEV_TX_OK;
}
-static void loopback_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
{
- u64 bytes = 0;
- u64 packets = 0;
int i;
+ *packets = 0;
+ *bytes = 0;
+
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
@@ -114,12 +106,22 @@ static void loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
- tbytes = lb_stats->bytes;
- tpackets = lb_stats->packets;
+ tpackets = u64_stats_read(&lb_stats->packets);
+ tbytes = u64_stats_read(&lb_stats->bytes);
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
- bytes += tbytes;
- packets += tpackets;
+ *bytes += tbytes;
+ *packets += tpackets;
}
+}
+EXPORT_SYMBOL(dev_lstats_read);
+
+static void loopback_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 packets, bytes;
+
+ dev_lstats_read(dev, &packets, &bytes);
+
stats->rx_packets = packets;
stats->tx_packets = packets;
stats->rx_bytes = bytes;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index e59a8826f36d..059711edfc61 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -753,7 +753,7 @@ err_fib_destroy:
return err;
}
-static struct nsim_dev *nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev)
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev;
struct devlink *devlink;
@@ -761,7 +761,7 @@ static struct nsim_dev *nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev)
devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
if (!devlink)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
devlink_net_set(devlink, nsim_bus_dev->initial_net);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
@@ -773,6 +773,8 @@ static struct nsim_dev *nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
err = nsim_dev_resources_register(devlink);
if (err)
goto err_devlink_free;
@@ -818,7 +820,8 @@ static struct nsim_dev *nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev)
goto err_bpf_dev_exit;
devlink_params_publish(devlink);
- return nsim_dev;
+ devlink_reload_enable(devlink);
+ return 0;
err_bpf_dev_exit:
nsim_bpf_dev_exit(nsim_dev);
@@ -841,7 +844,7 @@ err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
err_devlink_free:
devlink_free(devlink);
- return ERR_PTR(err);
+ return err;
}
static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
@@ -858,10 +861,13 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
nsim_fib_destroy(devlink, nsim_dev->fib_data);
}
-static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
+ devlink_reload_disable(devlink);
+
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
@@ -873,40 +879,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
devlink_free(devlink);
}
-int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
-{
- struct nsim_dev *nsim_dev;
- int i;
- int err;
-
- nsim_dev = nsim_dev_create(nsim_bus_dev);
- if (IS_ERR(nsim_dev))
- return PTR_ERR(nsim_dev);
- dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
-
- mutex_lock(&nsim_dev->port_list_lock);
- for (i = 0; i < nsim_bus_dev->port_count; i++) {
- err = __nsim_dev_port_add(nsim_dev, i);
- if (err)
- goto err_port_del_all;
- }
- mutex_unlock(&nsim_dev->port_list_lock);
- return 0;
-
-err_port_del_all:
- mutex_unlock(&nsim_dev->port_list_lock);
- nsim_dev_port_del_all(nsim_dev);
- nsim_dev_destroy(nsim_dev);
- return err;
-}
-
-void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
-{
- struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
-
- nsim_dev_destroy(nsim_dev);
-}
-
static struct nsim_dev_port *
__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
{
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 2716235a0336..9aa637d162eb 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -82,20 +82,14 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_binary");
- if (err)
- return err;
binary = kmalloc(binary_len, GFP_KERNEL);
if (!binary)
return -ENOMEM;
get_random_bytes(binary, binary_len);
- err = devlink_fmsg_binary_put(fmsg, binary, binary_len);
+ err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len);
kfree(binary);
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
if (err)
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 68771b2f351a..afb119f38325 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -9,13 +9,7 @@
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -56,25 +50,9 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *nl_stats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- nl_stats = per_cpu_ptr(dev->lstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
- tbytes = nl_stats->bytes;
- tpackets = nl_stats->packets;
- } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
+ u64 packets, bytes;
- packets += tpackets;
- bytes += tbytes;
- }
+ dev_lstats_read(dev, &packets, &bytes);
stats->rx_packets = packets;
stats->tx_packets = 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fe602648b99f..5848219005d7 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -282,11 +282,6 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
-config AT803X_PHY
- tristate "AT803X PHYs"
- ---help---
- Currently supports the AT8030 and AT8035 model
-
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -364,6 +359,12 @@ config DP83867_PHY
---help---
Currently supports the DP83867 PHY.
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ ---help---
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config FIXED_PHY
tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -444,6 +445,12 @@ config NXP_TJA11XX_PHY
---help---
Currently supports the NXP TJA1100 and TJA1101 PHY.
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033 and AR8035 model
+
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a03437e091f3..b433ec3bf9a6 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_DP83822_PHY) += dp83822.o
obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
+obj-$(CONFIG_DP83869_PHY) += dp83869.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 8e30db28fd7d..aee62610bade 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2,7 +2,7 @@
/*
* drivers/net/phy/at803x.c
*
- * Driver for Atheros 803x PHY
+ * Driver for Qualcomm Atheros AR803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
*/
@@ -13,7 +13,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of_gpio.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_STATUS 0x11
#define AT803X_SS_SPEED_MASK (3 << 14)
@@ -62,16 +67,62 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
#define AT803X_PHY_ID_MASK 0xffffffef
-MODULE_DESCRIPTION("Atheros 803x PHY driver");
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
+struct at803x_priv {
+ int flags;
+#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+ struct regulator *vddio;
+};
+
struct at803x_context {
u16 bmcr;
u16 advertise;
@@ -237,6 +288,240 @@ static int at803x_resume(struct phy_device *phydev)
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
+static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id)
+{
+ return (phydev->phy_id & phydev->drv->phy_id_mask)
+ == (phy_id & phydev->drv->phy_id_mask);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ unsigned int sel, mask;
+ u32 freq, strength;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ mask = AT803X_CLK_OUT_MASK;
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(mask, sel);
+ priv->clk_25m_mask |= mask;
+
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
+ at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+ priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
+ }
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+ "vddio");
+ if (IS_ERR(priv->vddio)) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return PTR_ERR(priv->vddio);
+ }
+
+ ret = regulator_enable(priv->vddio);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct at803x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return at803x_parse_dt(phydev);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int val;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
+ if (val < 0)
+ return val;
+
+ val &= ~priv->clk_25m_mask;
+ val |= priv->clk_25m_reg;
+
+ return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
int ret;
@@ -259,8 +544,20 @@ static int at803x_config_init(struct phy_device *phydev)
ret = at803x_enable_tx_delay(phydev);
else
ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
- return ret;
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
static int at803x_ack_interrupt(struct phy_device *phydev)
@@ -409,10 +706,11 @@ static int at803x_read_status(struct phy_device *phydev)
static struct phy_driver at803x_driver[] = {
{
- /* ATHEROS 8035 */
+ /* Qualcomm Atheros AR8035 */
.phy_id = ATH8035_PHY_ID,
- .name = "Atheros 8035 ethernet",
+ .name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
+ .probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
@@ -423,10 +721,11 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8030 */
+ /* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
- .name = "Atheros 8030 ethernet",
+ .name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT803X_PHY_ID_MASK,
+ .probe = at803x_probe,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
@@ -437,10 +736,11 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8031 */
+ /* Qualcomm Atheros AR8031/AR8033 */
.phy_id = ATH8031_PHY_ID,
- .name = "Atheros 8031 ethernet",
+ .name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
+ .probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
@@ -454,8 +754,7 @@ static struct phy_driver at803x_driver[] = {
}, {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
- .name = "Atheros AR9331 built-in PHY",
- .config_init = at803x_config_init,
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6580094161a9..8f241b57fcf6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
index = rq->extts.index;
if (index >= N_EXT_TS)
return -EINVAL;
@@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
new file mode 100644
index 000000000000..1c7a7c57dec3
--- /dev/null
+++ b/drivers/net/phy/dp83869.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for the Texas Instruments DP83869 PHY
+ * Copyright (C) 2019 Texas Instruments Inc.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+
+#include <dt-bindings/net/ti-dp83869.h>
+
+#define DP83869_PHY_ID 0x2000a0f1
+#define DP83869_DEVADDR 0x1f
+
+#define MII_DP83869_PHYCTRL 0x10
+#define MII_DP83869_MICR 0x12
+#define MII_DP83869_ISR 0x13
+#define DP83869_CTRL 0x1f
+#define DP83869_CFG4 0x1e
+
+/* Extended Registers */
+#define DP83869_GEN_CFG3 0x0031
+#define DP83869_RGMIICTL 0x0032
+#define DP83869_STRAP_STS1 0x006e
+#define DP83869_RGMIIDCTL 0x0086
+#define DP83869_IO_MUX_CFG 0x0170
+#define DP83869_OP_MODE 0x01df
+#define DP83869_FX_CTRL 0x0c00
+
+#define DP83869_SW_RESET BIT(15)
+#define DP83869_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83869_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83869_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83869_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83869_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83869_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83869_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83869_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83869_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83869_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83869_MICR_JABBER_INT_EN BIT(0)
+
+#define MII_DP83869_BMCR_DEFAULT (BMCR_ANENABLE | \
+ BMCR_FULLDPLX | \
+ BMCR_SPEED1000)
+
+/* This is the same bit mask as the BMCR so re-use the BMCR default */
+#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
+
+/* CFG1 bits */
+#define DP83869_CFG1_DEFAULT (ADVERTISE_1000HALF | \
+ ADVERTISE_1000FULL | \
+ CTL1000_AS_MASTER)
+
+/* RGMIICTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* STRAP_STS1 bits */
+#define DP83869_STRAP_STS1_RESERVED BIT(11)
+
+/* PHYCTRL bits */
+#define DP83869_RX_FIFO_SHIFT 12
+#define DP83869_TX_FIFO_SHIFT 14
+
+/* PHY_CTRL lower bytes 0x48 are declared as reserved */
+#define DP83869_PHY_CTRL_DEFAULT 0x48
+#define DP83869_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 12)
+#define DP83869_PHYCR_RESERVED_MASK BIT(11)
+
+/* RGMIIDCTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_SHIFT 4
+
+/* IO_MUX_CFG bits */
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
+
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+
+/* CFG3 bits */
+#define DP83869_CFG3_PORT_MIRROR_EN BIT(0)
+
+/* CFG4 bits */
+#define DP83869_INT_OE BIT(7)
+
+/* OP MODE */
+#define DP83869_OP_MODE_MII BIT(5)
+#define DP83869_SGMII_RGMII_BRIDGE BIT(6)
+
+enum {
+ DP83869_PORT_MIRRORING_KEEP,
+ DP83869_PORT_MIRRORING_EN,
+ DP83869_PORT_MIRRORING_DIS,
+};
+
+struct dp83869_private {
+ int tx_fifo_depth;
+ int rx_fifo_depth;
+ int io_impedance;
+ int port_mirroring;
+ bool rxctrl_strap_quirk;
+ int clk_output_sel;
+ int mode;
+};
+
+static int dp83869_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83869_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83869_config_intr(struct phy_device *phydev)
+{
+ int micr_status = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83869_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83869_MICR_AN_ERR_INT_EN |
+ MII_DP83869_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83869_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83869_MICR_LINK_STS_CHNG_INT_EN |
+ MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+ }
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+}
+
+static int dp83869_config_port_mirroring(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+
+ if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
+ phy_set_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+ else
+ phy_clear_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node)
+ return -ENODEV;
+
+ dp83869->io_impedance = -EINVAL;
+
+ /* Optional configuration */
+ ret = of_property_read_u32(of_node, "ti,clk-output-sel",
+ &dp83869->clk_output_sel);
+ if (ret || dp83869->clk_output_sel > DP83869_CLK_O_SEL_REF_CLK)
+ dp83869->clk_output_sel = DP83869_CLK_O_SEL_REF_CLK;
+
+ ret = of_property_read_u32(of_node, "ti,op-mode", &dp83869->mode);
+ if (ret == 0) {
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+
+ if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
+ else
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+
+ if (of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83869->rx_fifo_depth))
+ dp83869->rx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83869->tx_fifo_depth))
+ dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ return 0;
+}
+#else
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83869_configure_rgmii(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int ret, val;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ val = phy_read(phydev, MII_DP83869_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83869_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT);
+ val |= (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT);
+
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83869->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83869_DEVADDR,
+ DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83869->io_impedance &
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
+
+ return 0;
+}
+
+static int dp83869_configure_mode(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int phy_ctrl_val;
+ int ret;
+
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+
+ /* Below init sequence for each operational mode is defined in
+ * section 9.4.8 of the datasheet.
+ */
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ dp83869->mode);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_BMCR, MII_DP83869_BMCR_DEFAULT);
+ if (ret)
+ return ret;
+
+ phy_ctrl_val = (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT |
+ dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT |
+ DP83869_PHY_CTRL_DEFAULT);
+
+ switch (dp83869->mode) {
+ case DP83869_RGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = dp83869_configure_rgmii(phydev, dp83869);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_RGMII_SGMII_BRIDGE:
+ phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ DP83869_SGMII_RGMII_BRIDGE,
+ DP83869_SGMII_RGMII_BRIDGE);
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_1000M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_100M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_SGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_RGMII_1000_BASE:
+ case DP83869_RGMII_100_BASE:
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int dp83869_config_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret, val;
+
+ ret = dp83869_configure_mode(phydev, dp83869);
+ if (ret)
+ return ret;
+
+ /* Enable Interrupt output INT_OE in CFG4 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83869_CFG4);
+ val |= DP83869_INT_OE;
+ phy_write(phydev, DP83869_CFG4, val);
+ }
+
+ if (dp83869->port_mirroring != DP83869_PORT_MIRRORING_KEEP)
+ dp83869_config_port_mirroring(phydev);
+
+ /* Clock output selection if muxing property is set */
+ if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
+ phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83869->clk_output_sel <<
+ DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+
+ return 0;
+}
+
+static int dp83869_probe(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869;
+ int ret;
+
+ dp83869 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83869),
+ GFP_KERNEL);
+ if (!dp83869)
+ return -ENOMEM;
+
+ phydev->priv = dp83869;
+
+ ret = dp83869_of_init(phydev);
+ if (ret)
+ return ret;
+
+ return dp83869_config_init(phydev);
+}
+
+static int dp83869_phy_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, DP83869_CTRL, DP83869_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10, 20);
+
+ /* Global sw reset sets all registers to default.
+ * Need to set the registers in the PHY to the right config.
+ */
+ return dp83869_config_init(phydev);
+}
+
+static struct phy_driver dp83869_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(DP83869_PHY_ID),
+ .name = "TI DP83869",
+
+ .probe = dp83869_probe,
+ .config_init = dp83869_config_init,
+ .soft_reset = dp83869_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83869_ack_interrupt,
+ .config_intr = dp83869_config_intr,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+module_phy_driver(dp83869_driver);
+
+static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+ { PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83869_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83869 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 3b99882692e3..1bf13017d288 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,7 @@
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
@@ -206,6 +207,28 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+
+ sfp_parse_support(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, id, support);
+
+ if (iface != PHY_INTERFACE_MODE_10GKR) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct sfp_upstream_ops mv3310_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = mv3310_sfp_insert,
+};
+
static int mv3310_probe(struct phy_device *phydev)
{
struct mv3310_priv *priv;
@@ -236,7 +259,7 @@ static int mv3310_probe(struct phy_device *phydev)
if (ret)
return ret;
- return 0;
+ return phy_sfp_probe(phydev, &mv3310_sfp_ops);
}
static int mv3310_suspend(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e29ab841b4d..35876562e32a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -64,11 +64,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
if (mdiodev->dev.of_node)
reset = devm_reset_control_get_exclusive(&mdiodev->dev,
"phy");
- if (PTR_ERR(reset) == -ENOENT ||
- PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else if (IS_ERR(reset))
- return PTR_ERR(reset);
+ if (IS_ERR(reset)) {
+ if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
+ reset = NULL;
+ else
+ return PTR_ERR(reset);
+ }
mdiodev->reset_ctrl = reset;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 805cda3465d7..d5f8f351d9ef 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -252,13 +252,21 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_TR_LSB 17
#define MSCC_PHY_TR_MSB 18
-/* Microsemi PHY ID's */
+/* Microsemi PHY ID's
+ * Code assumes lowest nibble is 0
+ */
+#define PHY_ID_VSC8504 0x000704c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8552 0x000704e0
+#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8575 0x000707d0
+#define PHY_ID_VSC8582 0x000707b0
#define PHY_ID_VSC8584 0x000707c0
#define MSCC_VDDMAC_1500 1500
@@ -1595,6 +1603,9 @@ static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
else
addr = vsc8531->base_addr + i;
+ if (!map[addr])
+ continue;
+
phy = container_of(map[addr], struct phy_device, mdio);
if ((phy->phy_id & phydev->drv->phy_id_mask) !=
@@ -1647,14 +1658,29 @@ static int vsc8584_config_init(struct phy_device *phydev)
* in this pre-init function.
*/
if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
- if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+ /* The following switch statement assumes that the lowest
+ * nibble of the phy_id_mask is always 0. This works because
+ * the lowest nibble of the PHY_ID's below are also 0.
+ */
+ WARN_ON(phydev->drv->phy_id_mask & 0xf);
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8504:
+ case PHY_ID_VSC8552:
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
ret = vsc8574_config_pre_init(phydev);
- else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+ break;
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
ret = vsc8584_config_pre_init(phydev);
- else
+ break;
+ default:
ret = -EINVAL;
+ break;
+ }
if (ret)
goto err;
@@ -2322,6 +2348,32 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8504,
+ .name = "Microsemi GE VSC8504 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8514,
.name = "Microsemi GE VSC8514 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2445,6 +2497,82 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8552,
+ .name = "Microsemi GE VSC8552 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC856X,
+ .name = "Microsemi GE VSC856X SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8572,
+ .name = "Microsemi GE VSC8572 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2471,6 +2599,54 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8575,
+ .name = "Microsemi GE VSC8575 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8582,
+ .name = "Microsemi GE VSC8582 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2500,12 +2676,18 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8504, 0xfffffff0, },
{ PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
+ { PHY_ID_VSC8552, 0xfffffff0, },
+ { PHY_ID_VSC856X, 0xfffffff0, },
+ { PHY_ID_VSC8572, 0xfffffff0, },
{ PHY_ID_VSC8574, 0xfffffff0, },
+ { PHY_ID_VSC8575, 0xfffffff0, },
+ { PHY_ID_VSC8582, 0xfffffff0, },
{ PHY_ID_VSC8584, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 105d389b58e7..36d4ffe1cd3f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -23,6 +23,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
#include <linux/io.h>
@@ -841,6 +842,9 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (phydev->sfp_bus)
+ sfp_upstream_stop(phydev->sfp_bus);
+
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -875,6 +879,9 @@ void phy_start(struct phy_device *phydev)
goto out;
}
+ if (phydev->sfp_bus)
+ sfp_upstream_start(phydev->sfp_bus);
+
/* if phy was suspended, bring the physical link up again */
__phy_resume(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index fa71998fea51..990f22eb6ce7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -27,6 +27,7 @@
#include <linux/bitmap.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -488,7 +489,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv)
if (phydev->is_c45) {
for (i = 1; i < num_ids; i++) {
- if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
if ((phydrv->phy_id & phydrv->phy_id_mask) ==
@@ -632,7 +633,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
int i;
for (i = 1; i < num_ids; i++) {
- if (!(c45_ids->devices_in_package & (1 << i)))
+ if (c45_ids->device_ids[i] == 0xffffffff)
continue;
ret = phy_request_driver_module(dev,
@@ -812,10 +813,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
*/
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
- struct phy_c45_device_ids c45_ids = {0};
+ struct phy_c45_device_ids c45_ids;
u32 phy_id = 0;
int r;
+ c45_ids.devices_in_package = 0;
+ memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
+
r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -1175,6 +1179,65 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_attach - attach the SFP bus to the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .attach member.
+ */
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = bus;
+ phydev->sfp_bus_attached = true;
+}
+EXPORT_SYMBOL(phy_sfp_attach);
+
+/**
+ * phy_sfp_detach - detach the SFP bus from the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .detach member.
+ */
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = NULL;
+ phydev->sfp_bus_attached = false;
+}
+EXPORT_SYMBOL(phy_sfp_detach);
+
+/**
+ * phy_sfp_probe - probe for a SFP cage attached to this PHY device
+ * @phydev: Pointer to phy_device
+ * @ops: SFP's upstream operations
+ */
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops)
+{
+ struct sfp_bus *bus;
+ int ret;
+
+ if (phydev->mdio.dev.fwnode) {
+ bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ phydev->sfp_bus = bus;
+
+ ret = sfp_bus_add_upstream(bus, phydev, ops);
+ sfp_bus_put(bus);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_probe);
+
+/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
* @phydev: Pointer to phy_device to attach
@@ -1249,6 +1312,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (dev) {
phydev->attached_dev = dev;
dev->phydev = phydev;
+
+ if (phydev->sfp_bus_attached)
+ dev->sfp_bus = phydev->sfp_bus;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -2418,6 +2484,9 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
+ sfp_bus_del_upstream(phydev->sfp_bus);
+ phydev->sfp_bus = NULL;
+
if (phydev->drv && phydev->drv->remove) {
phydev->drv->remove(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index f16d9e92a81a..6ce2e6c63e75 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -567,7 +567,7 @@ static int phylink_register_sfp(struct phylink *pl,
struct sfp_bus *bus;
int ret;
- bus = sfp_register_upstream_node(fwnode, pl, &sfp_phylink_ops);
+ bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
ret = PTR_ERR(bus);
phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
@@ -576,7 +576,10 @@ static int phylink_register_sfp(struct phylink *pl,
pl->sfp_bus = bus;
- return 0;
+ ret = sfp_bus_add_upstream(bus, pl, &sfp_phylink_ops);
+ sfp_bus_put(bus);
+
+ return ret;
}
/**
@@ -670,8 +673,7 @@ EXPORT_SYMBOL_GPL(phylink_create);
*/
void phylink_destroy(struct phylink *pl)
{
- if (pl->sfp_bus)
- sfp_unregister_upstream(pl->sfp_bus);
+ sfp_bus_del_upstream(pl->sfp_bus);
if (pl->link_gpio)
gpiod_put(pl->link_gpio);
@@ -711,11 +713,6 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
int ret;
- memset(&config, 0, sizeof(config));
- linkmode_copy(supported, phy->supported);
- linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
-
/*
* This is the new way of dealing with flow control for PHYs,
* as described by Timur Tabi in commit 529ed1275263 ("net: phy:
@@ -723,10 +720,12 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
* using our validate call to the MAC, we rely upon the MAC
* clearing the bits from both supported and advertising fields.
*/
- if (phylink_test(supported, Pause))
- phylink_set(config.advertising, Pause);
- if (phylink_test(supported, Asym_Pause))
- phylink_set(config.advertising, Asym_Pause);
+ phy_support_asym_pause(phy);
+
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
+ config.interface = pl->link_config.interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index d037aab6a71d..c5398a023440 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -329,10 +329,19 @@ static void sfp_bus_release(struct kref *kref)
kfree(bus);
}
-static void sfp_bus_put(struct sfp_bus *bus)
+/**
+ * sfp_bus_put() - put a reference on the &struct sfp_bus
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
+ *
+ * Put a reference on the &struct sfp_bus and free the underlying structure
+ * if this was the last reference.
+ */
+void sfp_bus_put(struct sfp_bus *bus)
{
- kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
+ if (bus)
+ kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
}
+EXPORT_SYMBOL_GPL(sfp_bus_put);
static int sfp_register_bus(struct sfp_bus *bus)
{
@@ -348,11 +357,11 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
+ bus->registered = true;
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->upstream_ops->attach(bus->upstream, bus);
- bus->registered = true;
return 0;
}
@@ -446,13 +455,12 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
}
/**
- * sfp_register_upstream_node() - parse and register the neighbouring device
+ * sfp_bus_find_fwnode() - parse and locate the SFP bus from fwnode
* @fwnode: firmware node for the parent device (MAC or PHY)
- * @upstream: the upstream private data
- * @ops: the upstream's &struct sfp_upstream_ops
*
- * Parse the parent device's firmware node for a SFP bus, and register the
- * SFP bus using sfp_register_upstream().
+ * Parse the parent device's firmware node for a SFP bus, and locate
+ * the sfp_bus structure, incrementing its reference count. This must
+ * be put via sfp_bus_put() when done.
*
* Returns: on success, a pointer to the sfp_bus structure,
* %NULL if no SFP is specified,
@@ -462,9 +470,7 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
* %-ENOMEM if we failed to allocate the bus.
* an error from the upstream's connect_phy() method.
*/
-struct sfp_bus *sfp_register_upstream_node(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops)
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
{
struct fwnode_reference_args ref;
struct sfp_bus *bus;
@@ -482,7 +488,39 @@ struct sfp_bus *sfp_register_upstream_node(struct fwnode_handle *fwnode,
if (!bus)
return ERR_PTR(-ENOMEM);
+ return bus;
+}
+EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
+
+/**
+ * sfp_bus_add_upstream() - parse and register the neighbouring device
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
+ * @upstream: the upstream private data
+ * @ops: the upstream's &struct sfp_upstream_ops
+ *
+ * Add upstream driver for the SFP bus, and if the bus is complete, register
+ * the SFP bus using sfp_register_upstream(). This takes a reference on the
+ * bus, so it is safe to put the bus after this call.
+ *
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
+ */
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ int ret;
+
+ /* If no bus, return success */
+ if (!bus)
+ return 0;
+
rtnl_lock();
+ kref_get(&bus->kref);
bus->upstream_ops = ops;
bus->upstream = upstream;
@@ -495,33 +533,33 @@ struct sfp_bus *sfp_register_upstream_node(struct fwnode_handle *fwnode,
}
rtnl_unlock();
- if (ret) {
+ if (ret)
sfp_bus_put(bus);
- bus = ERR_PTR(ret);
- }
- return bus;
+ return ret;
}
-EXPORT_SYMBOL_GPL(sfp_register_upstream_node);
+EXPORT_SYMBOL_GPL(sfp_bus_add_upstream);
/**
- * sfp_unregister_upstream() - Unregister sfp bus
+ * sfp_bus_del_upstream() - Delete a sfp bus
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
*
- * Unregister a previously registered upstream connection for the SFP
- * module. @bus is returned from sfp_register_upstream().
+ * Delete a previously registered upstream connection for the SFP
+ * module. @bus should have been added by sfp_bus_add_upstream().
*/
-void sfp_unregister_upstream(struct sfp_bus *bus)
+void sfp_bus_del_upstream(struct sfp_bus *bus)
{
- rtnl_lock();
- if (bus->sfp)
- sfp_unregister_bus(bus);
- sfp_upstream_clear(bus);
- rtnl_unlock();
+ if (bus) {
+ rtnl_lock();
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
+ sfp_upstream_clear(bus);
+ rtnl_unlock();
- sfp_bus_put(bus);
+ sfp_bus_put(bus);
+ }
}
-EXPORT_SYMBOL_GPL(sfp_unregister_upstream);
+EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 272d5773573e..b0f88c2c0153 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -36,6 +36,8 @@ enum {
SFP_E_INSERT = 0,
SFP_E_REMOVE,
+ SFP_E_DEV_ATTACH,
+ SFP_E_DEV_DETACH,
SFP_E_DEV_DOWN,
SFP_E_DEV_UP,
SFP_E_TX_FAULT,
@@ -45,16 +47,21 @@ enum {
SFP_E_TIMEOUT,
SFP_MOD_EMPTY = 0,
+ SFP_MOD_ERROR,
SFP_MOD_PROBE,
+ SFP_MOD_WAITDEV,
SFP_MOD_HPOWER,
+ SFP_MOD_WAITPWR,
SFP_MOD_PRESENT,
- SFP_MOD_ERROR,
- SFP_DEV_DOWN = 0,
+ SFP_DEV_DETACHED = 0,
+ SFP_DEV_DOWN,
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
SFP_S_TX_FAULT,
@@ -64,10 +71,12 @@ enum {
static const char * const mod_state_strings[] = {
[SFP_MOD_EMPTY] = "empty",
+ [SFP_MOD_ERROR] = "error",
[SFP_MOD_PROBE] = "probe",
+ [SFP_MOD_WAITDEV] = "waitdev",
[SFP_MOD_HPOWER] = "hpower",
+ [SFP_MOD_WAITPWR] = "waitpwr",
[SFP_MOD_PRESENT] = "present",
- [SFP_MOD_ERROR] = "error",
};
static const char *mod_state_to_str(unsigned short mod_state)
@@ -78,6 +87,7 @@ static const char *mod_state_to_str(unsigned short mod_state)
}
static const char * const dev_state_strings[] = {
+ [SFP_DEV_DETACHED] = "detached",
[SFP_DEV_DOWN] = "down",
[SFP_DEV_UP] = "up",
};
@@ -92,6 +102,8 @@ static const char *dev_state_to_str(unsigned short dev_state)
static const char * const event_strings[] = {
[SFP_E_INSERT] = "insert",
[SFP_E_REMOVE] = "remove",
+ [SFP_E_DEV_ATTACH] = "dev_attach",
+ [SFP_E_DEV_DETACH] = "dev_detach",
[SFP_E_DEV_DOWN] = "dev_down",
[SFP_E_DEV_UP] = "dev_up",
[SFP_E_TX_FAULT] = "tx_fault",
@@ -110,7 +122,9 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
[SFP_S_TX_FAULT] = "tx_fault",
@@ -141,6 +155,7 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
+#define T_WAIT msecs_to_jiffies(50)
#define T_INIT_JIFFIES msecs_to_jiffies(300)
#define T_RESET_US 10
#define T_FAULT_RECOVER msecs_to_jiffies(1000)
@@ -149,22 +164,21 @@ static const enum gpiod_flags gpio_flags[] = {
* the same length on the PCB, which means it's possible for MOD DEF 0 to
* connect before the I2C bus on MOD DEF 1/2.
*
- * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to
- * be deasserted) but makes no mention of the earliest time before we can
- * access the I2C EEPROM. However, Avago modules require 300ms.
+ * The SFF-8472 specifies t_serial ("Time from power on until module is
+ * ready for data transmission over the two wire serial bus.") as 300ms.
*/
-#define T_PROBE_INIT msecs_to_jiffies(300)
-#define T_HPOWER_LEVEL msecs_to_jiffies(300)
-#define T_PROBE_RETRY msecs_to_jiffies(100)
+#define T_SERIAL msecs_to_jiffies(300)
+#define T_HPOWER_LEVEL msecs_to_jiffies(300)
+#define T_PROBE_RETRY_INIT msecs_to_jiffies(100)
+#define R_PROBE_RETRY_INIT 10
+#define T_PROBE_RETRY_SLOW msecs_to_jiffies(5000)
+#define R_PROBE_RETRY_SLOW 12
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
*/
#define SFP_PHY_ADDR 22
-/* Give this long for the PHY to reset. */
-#define T_PHY_RESET_MS 50
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -187,20 +201,25 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
int gpio_irq[GPIO_MAX];
- bool attached;
struct mutex st_mutex; /* Protects state */
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
struct mutex sm_mutex; /* Protects state machine */
unsigned char sm_mod_state;
+ unsigned char sm_mod_tries_init;
+ unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
unsigned int sm_retries;
struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
+
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
+ struct delayed_work hwmon_probe;
+ unsigned int hwmon_tries;
struct device *hwmon_dev;
char *hwmon_name;
#endif
@@ -1142,29 +1161,27 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
.info = sfp_hwmon_info,
};
-static int sfp_hwmon_insert(struct sfp *sfp)
+static void sfp_hwmon_probe(struct work_struct *work)
{
+ struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
int err, i;
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
- return 0;
-
- if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
- return 0;
-
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
- /* This driver in general does not support address
- * change.
- */
- return 0;
-
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
- if (err < 0)
- return err;
+ if (err < 0) {
+ if (sfp->hwmon_tries--) {
+ mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ T_PROBE_RETRY_SLOW);
+ } else {
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+ return;
+ }
sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name)
- return -ENODEV;
+ if (!sfp->hwmon_name) {
+ dev_err(sfp->dev, "out of memory for hwmon name\n");
+ return;
+ }
for (i = 0; sfp->hwmon_name[i]; i++)
if (hwmon_is_bad_char(sfp->hwmon_name[i]))
@@ -1174,18 +1191,52 @@ static int sfp_hwmon_insert(struct sfp *sfp)
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
NULL);
+ if (IS_ERR(sfp->hwmon_dev))
+ dev_err(sfp->dev, "failed to register hwmon device: %ld\n",
+ PTR_ERR(sfp->hwmon_dev));
+}
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+ return 0;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+ return 0;
+
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ /* This driver in general does not support address
+ * change.
+ */
+ return 0;
- return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+ mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
+
+ return 0;
}
static void sfp_hwmon_remove(struct sfp *sfp)
{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
hwmon_device_unregister(sfp->hwmon_dev);
sfp->hwmon_dev = NULL;
kfree(sfp->hwmon_name);
}
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ INIT_DELAYED_WORK(&sfp->hwmon_probe, sfp_hwmon_probe);
+
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
+}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
{
@@ -1195,6 +1246,15 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+}
#endif
/* Helpers */
@@ -1245,7 +1305,7 @@ static void sfp_sm_next(struct sfp *sfp, unsigned int state,
sfp_sm_set_timer(sfp, timeout);
}
-static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state,
+static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
unsigned int timeout)
{
sfp->sm_mod_state = state;
@@ -1266,8 +1326,6 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
struct phy_device *phy;
int err;
- msleep(T_PHY_RESET_MS);
-
phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
if (phy == ERR_PTR(-ENODEV)) {
dev_info(sfp->dev, "no PHY detected\n");
@@ -1335,7 +1393,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
event == SFP_E_LOS_LOW);
}
-static void sfp_sm_fault(struct sfp *sfp, bool warn)
+static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
dev_err(sfp->dev,
@@ -1345,21 +1403,17 @@ static void sfp_sm_fault(struct sfp *sfp, bool warn)
if (warn)
dev_err(sfp->dev, "module transmit fault indicated\n");
- sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER);
+ sfp_sm_next(sfp, next_state, T_FAULT_RECOVER);
}
}
static void sfp_sm_mod_init(struct sfp *sfp)
{
sfp_module_tx_enable(sfp);
+}
- /* Wait t_init before indicating that the link is up, provided the
- * current state indicates no TX_FAULT. If TX_FAULT clears before
- * this time, that's fine too.
- */
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
- sfp->sm_retries = 5;
-
+static void sfp_sm_probe_for_phy(struct sfp *sfp)
+{
/* Setting the serdes link mode is guesswork: there's no
* field in the EEPROM which indicates what mode should
* be used.
@@ -1375,69 +1429,83 @@ static void sfp_sm_mod_init(struct sfp *sfp)
sfp_sm_probe_phy(sfp);
}
-static int sfp_sm_mod_hpower(struct sfp *sfp)
+static int sfp_module_parse_power(struct sfp *sfp)
{
- u32 power;
- u8 val;
- int err;
+ u32 power_mW = 1000;
- power = 1000;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
- power = 1500;
+ power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
- power = 2000;
-
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE &&
- (sfp->id.ext.diagmon & (SFP_DIAGMON_DDM | SFP_DIAGMON_ADDRMODE)) !=
- SFP_DIAGMON_DDM) {
- /* The module appears not to implement bus address 0xa2,
- * or requires an address change sequence, so assume that
- * the module powers up in the indicated power mode.
- */
- if (power > sfp->max_power_mW) {
+ power_mW = 2000;
+
+ if (power_mW > sfp->max_power_mW) {
+ /* Module power specification exceeds the allowed maximum. */
+ if (sfp->id.ext.sff8472_compliance ==
+ SFP_SFF8472_COMPLIANCE_NONE &&
+ !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ /* The module appears not to implement bus address
+ * 0xa2, so assume that the module powers up in the
+ * indicated mode.
+ */
dev_err(sfp->dev,
"Host does not support %u.%uW modules\n",
- power / 1000, (power / 100) % 10);
+ power_mW / 1000, (power_mW / 100) % 10);
return -EINVAL;
+ } else {
+ dev_warn(sfp->dev,
+ "Host does not support %u.%uW modules, module left in power mode 1\n",
+ power_mW / 1000, (power_mW / 100) % 10);
+ return 0;
}
- return 0;
}
- if (power > sfp->max_power_mW) {
+ /* If the module requires a higher power mode, but also requires
+ * an address change sequence, warn the user that the module may
+ * not be functional.
+ */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
dev_warn(sfp->dev,
- "Host does not support %u.%uW modules, module left in power mode 1\n",
- power / 1000, (power / 100) % 10);
+ "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
+ power_mW / 1000, (power_mW / 100) % 10);
return 0;
}
- if (power <= 1000)
- return 0;
+ sfp->module_power_mW = power_mW;
+
+ return 0;
+}
+
+static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
+{
+ u8 val;
+ int err;
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- val |= BIT(0);
+ if (enable)
+ val |= BIT(0);
+ else
+ val &= ~BIT(0);
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
- power / 1000, (power / 100) % 10);
- return T_HPOWER_LEVEL;
+ if (enable)
+ dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
+ sfp->module_power_mW / 1000,
+ (sfp->module_power_mW / 100) % 10);
-err:
- return err;
+ return 0;
}
-static int sfp_sm_mod_probe(struct sfp *sfp)
+static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
@@ -1447,7 +1515,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
ret = sfp_read(sfp, false, 0, &id, sizeof(id));
if (ret < 0) {
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ if (report)
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
return -EAGAIN;
}
@@ -1517,106 +1586,174 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
- ret = sfp_hwmon_insert(sfp);
- if (ret < 0)
- return ret;
-
- ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ /* Parse the module power requirement */
+ ret = sfp_module_parse_power(sfp);
if (ret < 0)
return ret;
- return sfp_sm_mod_hpower(sfp);
+ return 0;
}
static void sfp_sm_mod_remove(struct sfp *sfp)
{
- sfp_module_remove(sfp->sfp_bus);
+ if (sfp->sm_mod_state > SFP_MOD_WAITDEV)
+ sfp_module_remove(sfp->sfp_bus);
sfp_hwmon_remove(sfp);
- if (sfp->mod_phy)
- sfp_sm_phy_detach(sfp);
-
- sfp_module_tx_disable(sfp);
-
memset(&sfp->id, 0, sizeof(sfp->id));
+ sfp->module_power_mW = 0;
dev_info(sfp->dev, "module removed\n");
}
-static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+/* This state machine tracks the upstream's state */
+static void sfp_sm_device(struct sfp *sfp, unsigned int event)
{
- mutex_lock(&sfp->sm_mutex);
+ switch (sfp->sm_dev_state) {
+ default:
+ if (event == SFP_E_DEV_ATTACH)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
- dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
- mod_state_to_str(sfp->sm_mod_state),
- dev_state_to_str(sfp->sm_dev_state),
- sm_state_to_str(sfp->sm_state),
- event_to_str(event));
+ case SFP_DEV_DOWN:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_UP)
+ sfp->sm_dev_state = SFP_DEV_UP;
+ break;
+
+ case SFP_DEV_UP:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_DOWN)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
+ }
+}
+
+/* This state machine tracks the insert/remove state of the module, probes
+ * the on-board EEPROM, and sets up the power level.
+ */
+static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+{
+ int err;
+
+ /* Handle remove event globally, it resets this state machine */
+ if (event == SFP_E_REMOVE) {
+ if (sfp->sm_mod_state > SFP_MOD_PROBE)
+ sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ return;
+ }
+
+ /* Handle device detach globally */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN &&
+ sfp->sm_mod_state > SFP_MOD_WAITDEV) {
+ if (sfp->module_power_mW > 1000 &&
+ sfp->sm_mod_state > SFP_MOD_HPOWER)
+ sfp_sm_mod_hpower(sfp, false);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ return;
+ }
- /* This state machine tracks the insert/remove state of
- * the module, and handles probing the on-board EEPROM.
- */
switch (sfp->sm_mod_state) {
default:
- if (event == SFP_E_INSERT && sfp->attached) {
- sfp_module_tx_disable(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
+ if (event == SFP_E_INSERT) {
+ sfp_sm_mod_next(sfp, SFP_MOD_PROBE, T_SERIAL);
+ sfp->sm_mod_tries_init = R_PROBE_RETRY_INIT;
+ sfp->sm_mod_tries = R_PROBE_RETRY_SLOW;
}
break;
case SFP_MOD_PROBE:
- if (event == SFP_E_REMOVE) {
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
- } else if (event == SFP_E_TIMEOUT) {
- int val = sfp_sm_mod_probe(sfp);
-
- if (val == 0)
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
- else if (val > 0)
- sfp_sm_ins_next(sfp, SFP_MOD_HPOWER, val);
- else if (val != -EAGAIN)
- sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0);
- else
- sfp_sm_set_timer(sfp, T_PROBE_RETRY);
+ /* Wait for T_PROBE_INIT to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ err = sfp_sm_mod_probe(sfp, sfp->sm_mod_tries == 1);
+ if (err == -EAGAIN) {
+ if (sfp->sm_mod_tries_init &&
+ --sfp->sm_mod_tries_init) {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ break;
+ } else if (sfp->sm_mod_tries && --sfp->sm_mod_tries) {
+ if (sfp->sm_mod_tries == R_PROBE_RETRY_SLOW - 1)
+ dev_warn(sfp->dev,
+ "please wait, module slow to respond\n");
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_SLOW);
+ break;
+ }
+ }
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ break;
}
- break;
- case SFP_MOD_HPOWER:
- if (event == SFP_E_TIMEOUT) {
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ /* fall through */
+ case SFP_MOD_WAITDEV:
+ /* Ensure that the device is attached before proceeding */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN)
+ break;
+
+ /* Report the module insertion to the upstream device */
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
}
- /* fallthrough */
- case SFP_MOD_PRESENT:
- case SFP_MOD_ERROR:
- if (event == SFP_E_REMOVE) {
- sfp_sm_mod_remove(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
+
+ /* If this is a power level 1 module, we are done */
+ if (sfp->module_power_mW <= 1000)
+ goto insert;
+
+ sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0);
+ /* fall through */
+ case SFP_MOD_HPOWER:
+ /* Enable high power mode */
+ err = sfp_sm_mod_hpower(sfp, true);
+ if (err < 0) {
+ if (err != -EAGAIN) {
+ sfp_module_remove(sfp->sfp_bus);
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ } else {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ }
+ break;
}
+
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITPWR, T_HPOWER_LEVEL);
break;
- }
- /* This state machine tracks the netdev up/down state */
- switch (sfp->sm_dev_state) {
- default:
- if (event == SFP_E_DEV_UP)
- sfp->sm_dev_state = SFP_DEV_UP;
+ case SFP_MOD_WAITPWR:
+ /* Wait for T_HPOWER_LEVEL to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ insert:
+ sfp_sm_mod_next(sfp, SFP_MOD_PRESENT, 0);
break;
- case SFP_DEV_UP:
- if (event == SFP_E_DEV_DOWN) {
- /* If the module has a PHY, avoid raising TX disable
- * as this resets the PHY. Otherwise, raise it to
- * turn the laser off.
- */
- if (!sfp->mod_phy)
- sfp_module_tx_disable(sfp);
- sfp->sm_dev_state = SFP_DEV_DOWN;
- }
+ case SFP_MOD_PRESENT:
+ case SFP_MOD_ERROR:
break;
}
+#if IS_ENABLED(CONFIG_HWMON)
+ if (sfp->sm_mod_state >= SFP_MOD_WAITDEV &&
+ IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ err = sfp_hwmon_insert(sfp);
+ if (err)
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+#endif
+}
+
+static void sfp_sm_main(struct sfp *sfp, unsigned int event)
+{
+ unsigned long timeout;
+
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
(sfp->sm_mod_state != SFP_MOD_PRESENT ||
@@ -1626,29 +1763,83 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
sfp_sm_link_down(sfp);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ sfp_module_tx_disable(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
- mutex_unlock(&sfp->sm_mutex);
return;
}
/* The main state machine */
switch (sfp->sm_state) {
case SFP_S_DOWN:
- if (sfp->sm_mod_state == SFP_MOD_PRESENT &&
- sfp->sm_dev_state == SFP_DEV_UP)
- sfp_sm_mod_init(sfp);
+ if (sfp->sm_mod_state != SFP_MOD_PRESENT ||
+ sfp->sm_dev_state != SFP_DEV_UP)
+ break;
+
+ sfp_sm_mod_init(sfp);
+
+ /* Initialise the fault clearance retries */
+ sfp->sm_retries = 5;
+
+ /* We need to check the TX_FAULT state, which is not defined
+ * while TX_DISABLE is asserted. The earliest we want to do
+ * anything (such as probe for a PHY) is 50ms.
+ */
+ sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ break;
+
+ case SFP_S_WAIT:
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ if (sfp->state & SFP_F_TX_FAULT) {
+ /* Wait t_init before indicating that the link is up,
+ * provided the current state indicates no TX_FAULT. If
+ * TX_FAULT clears before this time, that's fine too.
+ */
+ timeout = T_INIT_JIFFIES;
+ if (timeout > T_WAIT)
+ timeout -= T_WAIT;
+ else
+ timeout = 1;
+
+ sfp_sm_next(sfp, SFP_S_INIT, timeout);
+ } else {
+ /* TX_FAULT is not asserted, assume the module has
+ * finished initialising.
+ */
+ goto init_done;
+ }
break;
case SFP_S_INIT:
- if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT)
- sfp_sm_fault(sfp, true);
- else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR)
+ if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
+ /* TX_FAULT is still asserted after t_init, so assume
+ * there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+ sfp->sm_retries == 5);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ sfp_sm_probe_for_phy(sfp);
sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_retries = 5;
+ }
+ break;
+
+ case SFP_S_INIT_TX_FAULT:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_module_tx_fault_reset(sfp);
+ sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ }
break;
case SFP_S_WAIT_LOS:
if (event == SFP_E_TX_FAULT)
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
else if (sfp_los_event_inactive(sfp, event))
sfp_sm_link_up(sfp);
break;
@@ -1656,7 +1847,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_LINK_UP:
if (event == SFP_E_TX_FAULT) {
sfp_sm_link_down(sfp);
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
} else if (sfp_los_event_active(sfp, event)) {
sfp_sm_link_down(sfp);
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1672,7 +1863,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_REINIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- sfp_sm_fault(sfp, false);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, false);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
dev_info(sfp->dev, "module transmit fault recovered\n");
sfp_sm_link_check_los(sfp);
@@ -1682,6 +1873,21 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_TX_DISABLE:
break;
}
+}
+
+static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+{
+ mutex_lock(&sfp->sm_mutex);
+
+ dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state),
+ event_to_str(event));
+
+ sfp_sm_device(sfp, event);
+ sfp_sm_module(sfp, event);
+ sfp_sm_main(sfp, event);
dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n",
mod_state_to_str(sfp->sm_mod_state),
@@ -1693,15 +1899,12 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
static void sfp_attach(struct sfp *sfp)
{
- sfp->attached = true;
- if (sfp->state & SFP_F_PRESENT)
- sfp_sm_event(sfp, SFP_E_INSERT);
+ sfp_sm_event(sfp, SFP_E_DEV_ATTACH);
}
static void sfp_detach(struct sfp *sfp)
{
- sfp->attached = false;
- sfp_sm_event(sfp, SFP_E_REMOVE);
+ sfp_sm_event(sfp, SFP_E_DEV_DETACH);
}
static void sfp_start(struct sfp *sfp)
@@ -1846,6 +2049,8 @@ static struct sfp *sfp_alloc(struct device *dev)
INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
+ sfp_hwmon_init(sfp);
+
return sfp;
}
@@ -1853,6 +2058,8 @@ static void sfp_cleanup(void *data)
{
struct sfp *sfp = data;
+ sfp_hwmon_exit(sfp);
+
cancel_delayed_work_sync(&sfp->poll);
cancel_delayed_work_sync(&sfp->timeout);
if (sfp->i2c_mii) {
@@ -1964,6 +2171,11 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
+ if (sfp->state & SFP_F_PRESENT) {
+ rtnl_lock();
+ sfp_sm_event(sfp, SFP_E_INSERT);
+ rtnl_unlock();
+ }
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cac64b96d545..4d479e3c817d 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -855,6 +855,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dab6cccfeb52..683d371e6e82 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -136,10 +136,10 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
struct tun_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -313,8 +313,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
tfile->napi_enabled = napi_en;
tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
}
}
@@ -1167,10 +1167,10 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
p = per_cpu_ptr(tun->pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
@@ -1998,8 +1998,8 @@ drop:
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
@@ -2052,8 +2052,8 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += ret;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, ret);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2147,8 +2147,8 @@ done:
/* caller is in process context, */
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len + vlan_hlen;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2511,8 +2511,8 @@ build:
*/
stats = this_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += datasize;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, datasize);
u64_stats_update_end(&stats->syncp);
if (rxhash)
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 011bd4cb546e..af3994e0853b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
- if (ret < 0) {
+ if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe630438f67b..0cdb2ce47645 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -766,6 +766,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 00cab3f43a4c..c2c82e6391b4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
/* read current mtu value from device */
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
+ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
+ if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}
@@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
+ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < 0)
dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 596428ec71df..4196c0e32740 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1362,6 +1362,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
@@ -1370,6 +1371,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b9f526ed0d30..ac079395c8d4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -670,6 +670,7 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
+ LENOVO_MACPASSTHRU,
};
/* Define these values to match your device */
@@ -1408,38 +1409,52 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
int ret = -EINVAL;
u32 ocp_data;
unsigned char buf[6];
-
- /* test for -AD variant of RTL8153 */
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) == 0x1000) {
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1) {
- netif_dbg(tp, probe, tp->netdev,
- "No efuse for RTL8153-AD MAC pass through\n");
- return -ENODEV;
- }
+ char *mac_obj_name;
+ acpi_object_type mac_obj_type;
+ int mac_strlen;
+
+ if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) {
+ mac_obj_name = "\\MACA";
+ mac_obj_type = ACPI_TYPE_STRING;
+ mac_strlen = 0x16;
} else {
- /* test for RTL8153-BND and RTL8153-BD */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
- netif_dbg(tp, probe, tp->netdev,
- "Invalid variant for MAC pass through\n");
- return -ENODEV;
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND and RTL8153-BD */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
}
+
+ mac_obj_name = "\\_SB.AMAC";
+ mac_obj_type = ACPI_TYPE_BUFFER;
+ mac_strlen = 0x17;
}
/* returns _AUXMAC_#AABBCCDDEEFF# */
- status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer);
obj = (union acpi_object *)buffer.pointer;
if (!ACPI_SUCCESS(status))
return -ENODEV;
- if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ if (obj->type != mac_obj_type || obj->string.length != mac_strlen) {
netif_warn(tp, probe, tp->netdev,
"Invalid buffer for pass-thru MAC addr: (%d, %d)\n",
obj->type, obj->string.length);
goto amacout;
}
+
if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
netif_warn(tp, probe, tp->netdev,
@@ -6629,6 +6644,10 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
+ set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
(!strcmp(udev->serial, "000001000000") ||
!strcmp(udev->serial, "000002000000"))) {
@@ -6755,6 +6774,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9f3c839f9e5f..a552df37a347 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -260,14 +260,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- if (!rcv_xdp) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
- }
+ if (!rcv_xdp)
+ dev_lstats_add(dev, length);
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -281,26 +275,11 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
{
struct veth_priv *priv = netdev_priv(dev);
- int cpu;
-
- result->packets = 0;
- result->bytes = 0;
- for_each_possible_cpu(cpu) {
- struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
- u64 packets, bytes;
- unsigned int start;
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- result->packets += packets;
- result->bytes += bytes;
- }
+ dev_lstats_read(dev, packets, bytes);
return atomic64_read(&priv->dropped);
}
@@ -335,11 +314,11 @@ static void veth_get_stats64(struct net_device *dev,
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct veth_rq_stats rx;
- struct pcpu_lstats tx;
+ u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(&tx, dev);
- tot->tx_bytes = tx.bytes;
- tot->tx_packets = tx.packets;
+ tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+ tot->tx_bytes = bytes;
+ tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
tot->rx_dropped = rx.xdp_drops;
@@ -349,9 +328,9 @@ static void veth_get_stats64(struct net_device *dev,
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped += veth_stats_tx(&tx, peer);
- tot->rx_bytes += tx.bytes;
- tot->rx_packets += tx.packets;
+ tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+ tot->rx_bytes += bytes;
+ tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
tot->tx_bytes += rx.xdp_bytes;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 14e324b84617..e8563acf98e8 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -47,13 +47,7 @@ static int vsockmon_close(struct net_device *dev)
static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -63,30 +57,9 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *vstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- vstats = per_cpu_ptr(dev->lstats, i);
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
- do {
- start = u64_stats_fetch_begin_irq(&vstats->syncp);
- tbytes = vstats->bytes;
- tpackets = vstats->packets;
- } while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
-
- packets += tpackets;
- bytes += tbytes;
- }
-
- stats->rx_packets = packets;
stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
stats->tx_bytes = 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 11f5776affb1..bf04bc2e68c2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3175,9 +3175,29 @@ static void vxlan_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
+static int vxlan_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ dst->remote_ifindex);
+
+ if (!lowerdev) {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+ }
+
+ return __ethtool_get_link_ksettings(lowerdev, cmd);
+}
+
static const struct ethtool_ops vxlan_ethtool_ops = {
- .get_drvinfo = vxlan_get_drvinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = vxlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = vxlan_get_link_ksettings,
};
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 46f1427e6e9e..ba326f6c1214 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1781,8 +1781,8 @@ static int adm8211_probe(struct pci_dev *pdev,
{
struct ieee80211_hw *dev;
struct adm8211_priv *priv;
- unsigned long mem_addr, mem_len;
- unsigned int io_addr, io_len;
+ unsigned long mem_len;
+ unsigned int io_len;
int err;
u32 reg;
u8 perm_addr[ETH_ALEN];
@@ -1794,9 +1794,7 @@ static int adm8211_probe(struct pci_dev *pdev,
return err;
}
- io_addr = pci_resource_start(pdev, 0);
io_len = pci_resource_len(pdev, 0);
- mem_addr = pci_resource_start(pdev, 1);
mem_len = pci_resource_len(pdev, 1);
if (io_len < 256 || mem_len < 1024) {
printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index b94759daeacc..da2d179430ca 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
if (flags & AR5523_CMD_FLAG_MAGIC)
hdr->magic = cpu_to_be32(1 << 24);
- memcpy(hdr + 1, idata, ilen);
+ if (ilen)
+ memcpy(hdr + 1, idata, ilen);
cmd->odata = odata;
cmd->olen = olen;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index eca87f7c5b6c..294fbc1e89ab 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1704,9 +1704,6 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
/* Correctly initialize memory to 0 to prevent garbage
* data crashing system when download firmware
*/
- memset(dest_ring->base_addr_owner_space_unaligned, 0,
- nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
-
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
@@ -2019,8 +2016,6 @@ void ath10k_ce_alloc_rri(struct ath10k *ar)
value |= ar->hw_ce_regs->upd->mask;
ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
}
-
- memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
}
EXPORT_SYMBOL(ath10k_ce_alloc_rri);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 383d4fa555a8..4f76ba5d78a9 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -11,6 +11,7 @@
#include <linux/property.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/pm_qos.h>
#include <asm/byteorder.h>
#include "core.h"
@@ -677,13 +678,22 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
+ int ret;
u32 param = 0;
- ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
- ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
- ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ if (ret)
+ return ret;
/* Data transfer is not initiated, when reduced Tx completion
* is used for SDIO. disable it until fixed
@@ -700,14 +710,23 @@ static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
else
param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
- ath10k_bmi_write32(ar, hi_acs_flags, param);
+ ret = ath10k_bmi_write32(ar, hi_acs_flags, param);
+ if (ret)
+ return ret;
/* Explicitly set fwlog prints to zero as target may turn it on
* based on scratch registers.
*/
- ath10k_bmi_read32(ar, hi_option_flag, &param);
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
+ if (ret)
+ return ret;
+
param |= HI_OPTION_DISABLE_DBGLOG;
- ath10k_bmi_write32(ar, hi_option_flag, param);
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int ath10k_init_configure_target(struct ath10k *ar)
@@ -1009,6 +1028,7 @@ static int ath10k_download_fw(struct ath10k *ar)
u32 address, data_len;
const void *data;
int ret;
+ struct pm_qos_request latency_qos;
address = ar->hw_params.patch_load_addr;
@@ -1042,8 +1062,14 @@ static int ath10k_download_fw(struct ath10k *ar)
ret);
}
- return ath10k_bmi_fast_download(ar, address,
- data, data_len);
+ memset(&latency_qos, 0, sizeof(latency_qos));
+ pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+
+ pm_qos_remove_request(&latency_qos);
+
+ return ret;
}
void ath10k_core_free_board_files(struct ath10k *ar)
@@ -2565,8 +2591,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err;
- if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar, mode);
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ status = ath10k_init_sdio(ar, mode);
+ if (status) {
+ ath10k_err(ar, "failed to init SDIO: %d\n", status);
+ goto err;
+ }
+ }
}
ar->htc.htc_ops.target_send_suspend_complete =
@@ -2787,7 +2818,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
status = ath10k_hif_set_target_log_mode(ar, fw_diag_log);
if (status && status != -EOPNOTSUPP) {
- ath10k_warn(ar, "set traget log mode faileds: %d\n", status);
+ ath10k_warn(ar, "set target log mode failed: %d\n", status);
goto err_hif_stop;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 4d7db07db6ba..af68eb5d0776 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -169,6 +169,7 @@ struct ath10k_wmi {
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
+ struct wmi_peer_param_map *peer_param;
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
@@ -963,12 +964,20 @@ struct ath10k {
u32 hw_eeprom_rd;
u32 ht_cap_info;
u32 vht_cap_info;
+ u32 vht_supp_mcs;
u32 num_rf_chains;
u32 max_spatial_stream;
/* protected by conf_mutex */
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
+ u32 sys_cap_info;
+
+ /* protected by data_lock */
+ bool hw_rfkill_on;
+
/* protected by conf_mutex */
u8 ps_state_enable;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index b6d2932383cf..2a4498067024 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -703,7 +703,7 @@ static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -786,7 +786,7 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -891,7 +891,7 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -951,6 +951,19 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
};
+static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = {
+ {
+ /* MSA region start is not fixed, hence it is assigned at runtime */
+ .type = ATH10K_MEM_REGION_TYPE_MSA,
+ .len = 0x100000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
@@ -1048,6 +1061,14 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
},
},
+ {
+ .hw_id = WCN3990_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_WCN3990,
+ .region_table = {
+ .regions = wcn399x_hw10_mem_regions,
+ .size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
+ },
+ },
};
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
@@ -1208,9 +1229,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
- memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
- crash_data->ramdump_buf_len);
- sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ if (crash_data->ramdump_buf_len) {
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
}
mutex_unlock(&ar->dump_mutex);
@@ -1257,6 +1280,9 @@ int ath10k_coredump_register(struct ath10k *ar)
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+ if (!crash_data->ramdump_buf_len)
+ return 0;
+
crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
if (!crash_data->ramdump_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 09de41922f97..8bf03e8c1d3a 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -115,6 +115,7 @@ enum ath10k_mem_region_type {
ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
ATH10K_MEM_REGION_TYPE_IOSRAM = 6,
ATH10K_MEM_REGION_TYPE_IOREG = 7,
+ ATH10K_MEM_REGION_TYPE_MSA = 8,
};
/* Define a section of the region which should be copied. As not all parts
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index bd2b5628f850..04c50a26a4f4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
*len += scnprintf(buf + *len, buf_len - *len,
"No. Preamble Rate_code ");
- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
*len += scnprintf(buf + *len, buf_len - *len,
"tpc_value%d ", i);
@@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
}
int ath10k_debug_register(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 42931a669b02..367539f2c370 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -430,7 +430,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
- WMI_PEER_DEBUG, peer_debug_trigger);
+ ar->wmi.peer_param->debug, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 53f1095de8ff..d95b63f133ab 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2073,7 +2073,7 @@ static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
case 24:
pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
break;
- };
+ }
}
static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
@@ -2726,7 +2726,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer) {
+ if (!peer || !peer->sta) {
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
continue;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index c415e971735b..2451e0fb8ee5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -155,6 +155,9 @@ const struct ath10k_hw_values qca6174_values = {
.num_target_ce_config_wlan = 7,
.ce_desc_meta_data_mask = 0xFFFC,
.ce_desc_meta_data_lsb = 2,
+ .rfkill_pin = 16,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
};
const struct ath10k_hw_values qca99x0_values = {
@@ -1145,6 +1148,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
.rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
const struct ath10k_hw_ops qca6174_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 2ae57c1de7b5..35a362329a4f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -379,6 +379,9 @@ struct ath10k_hw_values {
u8 num_target_ce_config_wlan;
u16 ce_desc_meta_data_mask;
u8 ce_desc_meta_data_lsb;
+ u32 rfkill_pin;
+ u32 rfkill_cfg;
+ bool rfkill_on_level;
};
extern const struct ath10k_hw_values qca988x_values;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a6d21856b7e7..83cc8778ca1e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
#include "hif.h"
#include "core.h"
@@ -2773,7 +2774,7 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
return -EINVAL;
return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
- WMI_PEER_SMPS_STATE,
+ ar->wmi.peer_param->smps_state,
ath10k_smps_map[smps]);
}
@@ -2930,7 +2931,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* poked with peer param command.
*/
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
- WMI_PEER_DUMMY_VAR, 1);
+ ar->wmi.peer_param->dummy_var, 1);
if (ret) {
ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
arvif->bssid, arvif->vdev_id, ret);
@@ -3708,7 +3709,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
struct ieee80211_vif *vif,
enum ath10k_hw_txrx_mode txmode,
enum ath10k_mac_tx_path txpath,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool noque_offchan)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -3738,10 +3739,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
}
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
ieee80211_queue_work(hw, &ar->offchan_tx_work);
@@ -3803,8 +3804,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
@@ -3850,7 +3851,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
@@ -3860,8 +3861,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
- skb);
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ skb, skb->len);
if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
@@ -3903,8 +3904,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ar->running_fw->fw_file.fw_features)) {
paddr = dma_map_single(ar->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (!paddr)
+ if (dma_mapping_error(ar->dev, paddr)) {
+ ieee80211_free_txskb(ar->hw, skb);
continue;
+ }
ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
if (ret) {
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
@@ -4065,7 +4068,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
if (ret)
return ret;
- skb = ieee80211_tx_dequeue(hw, txq);
+ skb = ieee80211_tx_dequeue_ni(hw, txq);
if (!skb) {
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
@@ -4097,7 +4100,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to push frame: %d\n", ret);
@@ -4378,7 +4381,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
@@ -4754,6 +4757,63 @@ static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
return 0;
}
+static int ath10k_mac_rfkill_config(struct ath10k *ar)
+{
+ u32 param;
+ int ret;
+
+ if (ar->hw_values->rfkill_pin == 0) {
+ ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
+ ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
+ ar->hw_values->rfkill_on_level);
+
+ param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
+ ar->hw_values->rfkill_on_level) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
+ ar->hw_values->rfkill_pin) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
+ ar->hw_values->rfkill_cfg);
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->rfkill_config,
+ param);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set rfkill config 0x%x: %d\n",
+ param, ret);
+ return ret;
+ }
+ return 0;
+}
+
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
+{
+ enum wmi_tlv_rfkill_enable_radio param;
+ int ret;
+
+ if (enable)
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
+ else
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable,
+ param);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
@@ -4788,6 +4848,16 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err;
}
+ spin_lock_bh(&ar->data_lock);
+
+ if (ar->hw_rfkill_on) {
+ ar->hw_rfkill_on = false;
+ spin_unlock_bh(&ar->data_lock);
+ goto err;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "Could not init hif: %d\n", ret);
@@ -4801,6 +4871,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_power_down;
}
+ if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
+ ret = ath10k_mac_rfkill_config(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure rfkill: %d", ret);
+ goto err_core_stop;
+ }
+ }
+
param = ar->wmi.pdev_param->pmf_qos;
ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
@@ -4960,7 +5038,8 @@ static void ath10k_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on)
+ ath10k_halt(ar);
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -5635,6 +5714,37 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+}
+
static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -5645,10 +5755,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate, basic_rate_idx, rateidx;
- int ret = 0, hw_rate_code, mcast_rate;
+ u8 rate, rateidx;
+ int ret = 0, mcast_rate;
enum nl80211_band band;
- const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@@ -5872,29 +5981,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- if (ath10k_mac_vif_chan(vif, &def)) {
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- sband = ar->hw->wiphy->bands[def.chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
- bitrate = sband->bitrates[basic_rate_idx].bitrate;
-
- hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
- if (hw_rate_code < 0) {
- ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- vdev_param = ar->wmi.vdev_param->mgmt_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- hw_rate_code);
- if (ret)
- ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
- }
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath10k_mac_vif_chan(arvif->vif, &def))
+ ath10k_recalculate_mgmt_rate(ar, vif, &def);
mutex_unlock(&ar->conf_mutex);
}
@@ -6239,7 +6328,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_AUTHORIZE, 1);
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -6330,7 +6419,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_PHYMODE, mode);
+ ar->wmi.peer_param->phymode, mode);
if (err) {
ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
sta->addr, mode, err);
@@ -6338,7 +6427,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_CHAN_WIDTH, bw);
+ ar->wmi.peer_param->chan_width, bw);
if (err)
ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
@@ -6349,7 +6438,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_NSS, nss);
+ ar->wmi.peer_param->nss, nss);
if (err)
ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
@@ -6360,7 +6449,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_SMPS_STATE, smps);
+ ar->wmi.peer_param->smps_state, smps);
if (err)
ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
@@ -6434,7 +6523,7 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_USE_FIXED_PWR, txpwr);
+ ar->wmi.peer_param->use_fixed_power, txpwr);
if (ret) {
ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
ret);
@@ -6515,6 +6604,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
GFP_KERNEL);
if (!arsta->tx_stats) {
+ ath10k_mac_dec_num_stations(arvif, sta);
ret = -ENOMEM;
goto exit;
}
@@ -7419,7 +7509,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_PARAM_FIXED_RATE, rate);
if (err)
- ath10k_warn(ar, "failed to eanble STA %pM peer fixed rate: %d\n",
+ ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n",
sta->addr, err);
return true;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 1fe84948b868..98d83a26ea60 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -72,6 +72,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
void ath10k_mac_wait_tx_complete(struct ath10k *ar);
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index a0b4d265c6eb..bb44f5a0941b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2567,35 +2567,31 @@ static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
}
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
msleep(10);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
}
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS,
- val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
}
static int ath10k_pci_warm_reset(struct ath10k *ar)
@@ -3490,7 +3486,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
struct ath10k_bus_params bus_params = {};
- bool pci_ps;
+ bool pci_ps, is_qca988x = false;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
@@ -3500,6 +3496,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ is_qca988x = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
@@ -3619,25 +3616,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.link_can_suspend = true;
+ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
+ * fall off the bus during chip_reset. These chips have the same pci
+ * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
+ */
+ if (is_qca988x) {
+ bus_params.chip_id =
+ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id != 0xffffffff) {
+ if (!ath10k_pci_chip_is_supported(pdev->device,
+ bus_params.chip_id))
+ goto err_unsupported;
+ }
+ }
+
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq;
}
- bus_params.dev_type = ATH10K_DEV_TYPE_LL;
- bus_params.link_can_suspend = true;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (bus_params.chip_id == 0xffffffff) {
- ath10k_err(ar, "failed to get chip id\n");
- goto err_free_irq;
- }
+ if (bus_params.chip_id == 0xffffffff)
+ goto err_unsupported;
- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
- ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
- pdev->device, bus_params.chip_id);
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
goto err_free_irq;
- }
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
@@ -3647,6 +3653,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
return 0;
+err_unsupported:
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, bus_params.chip_id);
+
err_free_irq:
ath10k_pci_free_irq(ar);
ath10k_pci_rx_retry_sync(ar);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 3b63b6257c43..a0ba07b85362 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -111,6 +111,7 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_msa_info_resp_msg_v01 resp = {};
struct wlfw_msa_info_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ phys_addr_t max_mapped_addr;
struct qmi_txn txn;
int ret;
int i;
@@ -150,8 +151,20 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
+ max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
+ if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
+ resp.mem_region_info[i].region_addr > max_mapped_addr ||
+ resp.mem_region_info[i].region_addr < qmi->msa_pa ||
+ resp.mem_region_info[i].size +
+ resp.mem_region_info[i].region_addr > max_mapped_addr) {
+ ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
+ resp.mem_region_info[i].region_addr,
+ resp.mem_region_info[i].size);
+ ret = -EINVAL;
+ goto fail_unwind;
+ }
qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
qmi->mem_region[i].size = resp.mem_region_info[i].size;
qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
@@ -165,6 +178,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
return 0;
+fail_unwind:
+ memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
out:
return ret;
}
@@ -291,10 +306,16 @@ static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
struct wlfw_cal_report_resp_msg_v01 resp = {};
struct wlfw_cal_report_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int i, j = 0;
int ret;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_data_valid = 1;
+ req.xo_cal_data = ar_snoc->xo_cal_data;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
&resp);
if (ret < 0)
@@ -581,22 +602,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{
struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {};
+ struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
req.daemon_support_valid = 1;
req.daemon_support = 0;
- ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
- wlfw_host_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
+ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
+ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
+ else
+ req_ei = wlfw_host_cap_req_msg_v01_ei;
+
ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
QMI_WLFW_HOST_CAP_REQ_V01,
WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
- wlfw_host_cap_req_msg_v01_ei, &req);
+ req_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath10k_err(ar, "failed to send host capability request: %d\n", ret);
@@ -643,7 +671,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
wlfw_ini_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- ath10k_err(ar, "fail to send fw log reqest: %d\n", ret);
+ ath10k_err(ar, "failed to send fw log request: %d\n", ret);
goto out;
}
@@ -652,7 +680,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath10k_err(ar, "fw log request rejectedr: %d\n",
+ ath10k_err(ar, "fw log request rejected: %d\n",
resp.resp.error);
ret = -EINVAL;
goto out;
@@ -671,6 +699,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_ind_register_resp_msg_v01 resp = {};
struct wlfw_ind_register_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
@@ -681,6 +710,11 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
req.msa_ready_enable_valid = 1;
req.msa_ready_enable = 1;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_enable_valid = 1;
+ req.xo_cal_enable = 1;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_ind_register_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -739,6 +773,13 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
if (ret)
return;
+ /*
+ * HACK: sleep for a while inbetween receiving the msa info response
+ * and the XPU update to prevent SDM845 from crashing due to a security
+ * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
+ */
+ msleep(20);
+
ret = ath10k_qmi_setup_msa_permissions(qmi);
if (ret)
return;
@@ -795,9 +836,13 @@ ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
{
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
ath10k_qmi_remove_msa_permission(qmi);
ath10k_core_free_board_files(ar);
+ if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ ath10k_snoc_fw_crashed_dump(ar);
+
ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index 1fe05c6218c3..86fcf4e1de5f 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{}
};
+struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index bca1186e1560..4d107e1364a8 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 9870d2d095c8..120200a93bcc 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2086,9 +2086,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
- /* TODO: remove this once SDIO support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n");
-
return 0;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b491361e6ed4..16177497bba7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -9,9 +9,11 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include "ce.h"
+#include "coredump.h"
#include "debug.h"
#include "hif.h"
#include "htc.h"
@@ -36,15 +38,15 @@ static char *const ce_name[] = {
"WLAN_CE_11",
};
-static struct ath10k_vreg_info vreg_cfg[] = {
- {NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
- {NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
- {NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
- {NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
+static const char * const ath10k_regulators[] = {
+ "vdd-0.8-cx-mx",
+ "vdd-1.8-xo",
+ "vdd-1.3-rfa",
+ "vdd-3.3-ch0",
};
-static struct ath10k_clk_info clk_cfg[] = {
- {NULL, "cxo_ref_clk_pin", 0, false},
+static const char * const ath10k_clocks[] = {
+ "cxo_ref_clk_pin",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -976,8 +978,7 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar,
sizeof(struct ath10k_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
- cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
- sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
@@ -1257,10 +1258,29 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ar_snoc->ce_irqs[i].irq_line = res->start;
}
+ ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
+ &ar_snoc->xo_cal_data);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
+ if (ret == 0) {
+ ar_snoc->xo_cal_supported = true;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
+ ar_snoc->xo_cal_data);
+ }
+ ret = 0;
+
out:
return ret;
}
+static void ath10k_snoc_quirks_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = &ar_snoc->dev->dev;
+
+ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
+ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
+}
+
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1337,296 +1357,102 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
- struct ath10k_vreg_info *vreg_info)
-{
- struct regulator *reg;
- int ret = 0;
-
- reg = devm_regulator_get_optional(dev, vreg_info->name);
-
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
-
- if (ret == -EPROBE_DEFER) {
- ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
- vreg_info->name);
- return ret;
- }
- if (vreg_info->required) {
- ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "Optional regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- goto done;
- }
-
- vreg_info->reg = reg;
-
-done:
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v,
- vreg_info->load_ua, vreg_info->settle_delay);
-
- return 0;
-}
-
-static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
- struct ath10k_clk_info *clk_info)
-{
- struct clk *handle;
- int ret = 0;
-
- handle = devm_clk_get(dev, clk_info->name);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- if (clk_info->required) {
- ath10k_err(ar, "snoc clock %s isn't available: %d\n",
- clk_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
- clk_info->name,
- ret);
- return 0;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
- clk_info->name, clk_info->freq);
-
- clk_info->handle = handle;
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_on(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
- vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
- vreg_info->max_v);
- if (ret) {
- ath10k_err(ar,
- "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v);
- return ret;
- }
-
- if (vreg_info->load_ua) {
- ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
- if (ret < 0) {
- ath10k_err(ar, "failed to set regulator %s load: %d\n",
- vreg_info->name, vreg_info->load_ua);
- goto err_set_load;
- }
- }
-
- ret = regulator_enable(vreg_info->reg);
- if (ret) {
- ath10k_err(ar, "failed to enable regulator %s\n",
- vreg_info->name);
- goto err_enable;
- }
-
- if (vreg_info->settle_delay)
- udelay(vreg_info->settle_delay);
-
- return 0;
-
-err_enable:
- regulator_set_load(vreg_info->reg, 0);
-err_set_load:
- regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_off(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
+static int ath10k_hw_power_on(struct ath10k *ar)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int ret;
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
- vreg_info->name);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
- ret = regulator_disable(vreg_info->reg);
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
if (ret)
- ath10k_err(ar, "failed to disable regulator %s\n",
- vreg_info->name);
-
- ret = regulator_set_load(vreg_info->reg, 0);
- if (ret < 0)
- ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
+ return ret;
- ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
if (ret)
- ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
+ goto vreg_off;
return ret;
-}
-
-static int ath10k_snoc_vreg_on(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- ret = __ath10k_snoc_vreg_on(ar, vreg_info);
- if (ret)
- goto err_reg_config;
- }
-
- return 0;
-
-err_reg_config:
- for (i = i - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+vreg_off:
+ regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
return ret;
}
-static int ath10k_snoc_vreg_off(struct ath10k *ar)
+static int ath10k_hw_power_off(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
- if (!vreg_info->reg)
- continue;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
- ret = __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+ clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
- return ret;
+ return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
}
-static int ath10k_snoc_clk_init(struct ath10k *ar)
+static void ath10k_msa_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
- clk_info->name);
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ size_t buf_len;
+ u8 *buf;
- if (clk_info->freq) {
- ret = clk_set_rate(clk_info->handle, clk_info->freq);
-
- if (ret) {
- ath10k_err(ar, "failed to set clock %s freq %u\n",
- clk_info->name, clk_info->freq);
- goto err_clock_config;
- }
- }
-
- ret = clk_prepare_enable(clk_info->handle);
- if (ret) {
- ath10k_err(ar, "failed to enable clock %s\n",
- clk_info->name);
- goto err_clock_config;
- }
- }
-
- return 0;
-
-err_clock_config:
- for (i = i - 1; i >= 0; i--) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- clk_disable_unprepare(clk_info->handle);
- }
+ if (!crash_data || !crash_data->ramdump_buf)
+ return;
- return ret;
-}
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
-static int ath10k_snoc_clk_deinit(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int i;
+ current_region = &mem_layout->region_table.regions[0];
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+ memset(buf, 0, buf_len);
- if (!clk_info->handle)
- continue;
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
- clk_info->name);
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
+ hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
- clk_disable_unprepare(clk_info->handle);
+ if (current_region->len < ar_snoc->qmi->msa_mem_size) {
+ memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
+ ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
+ current_region->len, ar_snoc->qmi->msa_mem_size);
+ } else {
+ memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
}
-
- return 0;
}
-static int ath10k_hw_power_on(struct ath10k *ar)
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
{
- int ret;
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+ mutex_lock(&ar->dump_mutex);
- ret = ath10k_snoc_vreg_on(ar);
- if (ret)
- return ret;
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
- ret = ath10k_snoc_clk_init(ar);
- if (ret)
- goto vreg_off;
+ crash_data = ath10k_coredump_new(ar);
- return ret;
-
-vreg_off:
- ath10k_snoc_vreg_off(ar);
- return ret;
-}
-
-static int ath10k_hw_power_off(struct ath10k *ar)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
-
- ath10k_snoc_clk_deinit(ar);
-
- ret = ath10k_snoc_vreg_off(ar);
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
- return ret;
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_msa_dump_memory(ar, crash_data);
+ mutex_unlock(&ar->dump_mutex);
}
static const struct of_device_id ath10k_snoc_dt_match[] = {
@@ -1678,6 +1504,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size;
+ ath10k_snoc_quirks_init(ar);
+
ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
@@ -1695,20 +1523,37 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_release_resource;
}
- ar_snoc->vreg = vreg_cfg;
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
- if (ret)
- goto err_free_irq;
+ ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
+ sizeof(*ar_snoc->vregs), GFP_KERNEL);
+ if (!ar_snoc->vregs) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_vregs; i++)
+ ar_snoc->vregs[i].supply = ath10k_regulators[i];
- ar_snoc->clk = clk_cfg;
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
- if (ret)
- goto err_free_irq;
+ ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
+ ar_snoc->vregs);
+ if (ret < 0)
+ goto err_free_irq;
+
+ ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
+ ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
+ sizeof(*ar_snoc->clks), GFP_KERNEL);
+ if (!ar_snoc->clks) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_clks; i++)
+ ar_snoc->clks[i].id = ath10k_clocks[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
+ ar_snoc->clks);
+ if (ret)
+ goto err_free_irq;
+
ret = ath10k_hw_power_on(ar);
if (ret) {
ath10k_err(ar, "failed to power on device: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d62f53501fbb..c05df45a3945 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -42,29 +42,16 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
-struct ath10k_vreg_info {
- struct regulator *reg;
- const char *name;
- u32 min_v;
- u32 max_v;
- u32 load_ua;
- unsigned long settle_delay;
- bool required;
-};
-
-struct ath10k_clk_info {
- struct clk *handle;
- const char *name;
- u32 freq;
- bool required;
-};
-
enum ath10k_snoc_flags {
ATH10K_SNOC_FLAG_REGISTERED,
ATH10K_SNOC_FLAG_UNREGISTERING,
ATH10K_SNOC_FLAG_RECOVERY,
+ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
};
+struct clk_bulk_data;
+struct regulator_bulk_data;
+
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
@@ -76,10 +63,14 @@ struct ath10k_snoc {
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
- struct ath10k_vreg_info *vreg;
- struct ath10k_clk_info *clk;
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+ struct clk_bulk_data *clks;
+ size_t num_clks;
struct ath10k_qmi *qmi;
unsigned long flags;
+ bool xo_cal_supported;
+ u32 xo_cal_data;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -88,5 +79,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
}
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 4102df016931..39abf8b12903 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -95,6 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index e1420f67f776..1e0343081be9 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
struct ath10k_urb_context *urb_context = NULL;
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context = list_first_entry(&pipe->urb_list_head,
@@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
{
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
@@ -435,6 +443,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk transmit failed: %d\n", ret);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
ret = -EINVAL;
goto err_free_urb_to_pipe;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 4d5d10c01064..69a1ec53df29 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -409,6 +409,49 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
return 0;
}
+static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_rfkill_state_change_ev *ev;
+ const void **tb;
+ bool radio;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar,
+ "failed to parse rfkill state change event: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ __le32_to_cpu(ev->gpio_pin_num),
+ __le32_to_cpu(ev->int_type),
+ __le32_to_cpu(ev->radio_state));
+
+ radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!radio)
+ ar->hw_rfkill_on = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* notify cfg80211 radio state change */
+ ath10k_mac_rfkill_enable_radio(ar, radio);
+ wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
+}
+
static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -629,6 +672,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TX_PAUSE_EVENTID:
ath10k_wmi_tlv_event_tx_pause(ar, skb);
break;
+ case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
+ ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
+ break;
case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_tlv_event_temperature(ar, skb);
break;
@@ -1201,17 +1247,21 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->abi.abi_ver0;
arg->sw_ver1 = ev->abi.abi_ver1;
arg->fw_build = ev->fw_build_vers;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_2ghz_chan = reg->low_2ghz_chan;
+ arg->high_2ghz_chan = reg->high_2ghz_chan;
arg->low_5ghz_chan = reg->low_5ghz_chan;
arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+ arg->sys_cap_info = ev->sys_cap_info;
ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
ath10k_wmi_tlv_parse_mem_reqs, arg);
@@ -1649,8 +1699,9 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
static void
ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
{
- struct host_memory_chunk *chunk;
+ struct host_memory_chunk_tlv *chunk;
struct wmi_tlv *tlv;
+ dma_addr_t paddr;
int i;
__le16 tlv_len, tlv_tag;
@@ -1666,6 +1717,12 @@ ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+ if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ ar->wmi.svc_map)) {
+ paddr = ar->wmi.mem_chunks[i].paddr;
+ chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
+ }
+
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
i,
@@ -1689,7 +1746,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
void *ptr;
chunks_len = ar->wmi.num_mem_chunks *
- (sizeof(struct host_memory_chunk) + sizeof(*tlv));
+ (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
len = (sizeof(*tlv) + sizeof(*cmd)) +
(sizeof(*tlv) + sizeof(*cfg)) +
(sizeof(*tlv) + chunks_len);
@@ -4204,6 +4261,26 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+};
+
+static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
+ .smps_state = WMI_TLV_PEER_SMPS_STATE,
+ .ampdu = WMI_TLV_PEER_AMPDU,
+ .authorize = WMI_TLV_PEER_AUTHORIZE,
+ .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
+ .nss = WMI_TLV_PEER_NSS,
+ .use_4addr = WMI_TLV_PEER_USE_4ADDR,
+ .membership = WMI_TLV_PEER_MEMBERSHIP,
+ .user_pos = WMI_TLV_PEER_USERPOS,
+ .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
+ .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
+ .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
+ .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
+ .phymode = WMI_TLV_PEER_PHYMODE,
+ .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
+ .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -4394,6 +4471,7 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_tlv_cmd_map;
ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.peer_param = &wmi_tlv_peer_param_map;
ar->wmi.ops = &wmi_tlv_ops;
ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 649b229a41e9..4972dc12991c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -7,6 +7,8 @@
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
+#include <linux/bitops.h>
+
#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_CMD_UNSUPPORTED 0
@@ -528,6 +530,24 @@ enum wmi_tlv_vdev_param {
WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
};
+enum wmi_tlv_peer_param {
+ WMI_TLV_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_TLV_PEER_AMPDU = 0x2,
+ WMI_TLV_PEER_AUTHORIZE = 0x3,
+ WMI_TLV_PEER_CHAN_WIDTH = 0x4,
+ WMI_TLV_PEER_NSS = 0x5,
+ WMI_TLV_PEER_USE_4ADDR = 0x6,
+ WMI_TLV_PEER_MEMBERSHIP = 0x7,
+ WMI_TLV_PEER_USERPOS = 0x8,
+ WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED = 0x9,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR = 0xa,
+ WMI_TLV_PEER_SET_HW_RETRY_CTS2S = 0xb,
+ WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH = 0xc,
+ WMI_TLV_PEER_PHYMODE = 0xd,
+ WMI_TLV_PEER_USE_FIXED_PWR = 0xe,
+ WMI_TLV_PEER_DUMMY_VAR = 0xff,
+};
+
enum wmi_tlv_peer_flags {
WMI_TLV_PEER_AUTH = 0x00000001,
WMI_TLV_PEER_QOS = 0x00000002,
@@ -1409,6 +1429,11 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
WMI_TLV_MAX_EXT_SERVICE = 256,
};
@@ -1588,6 +1613,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
WMI_TLV_MAX_SERVICE);
SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@@ -1743,6 +1771,21 @@ struct wmi_tlv_resource_config {
__le32 host_capab;
} __packed;
+/* structure describing host memory chunk. */
+struct host_memory_chunk_tlv {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+
+ /* the physical address the memory chunk */
+ __le32 ptr;
+
+ /* size of the chunk */
+ __le32 size;
+
+ /* the upper 32 bit address valid only for more than 32 bit target */
+ __le32 ptr_high;
+} __packed;
+
struct wmi_tlv_init_cmd {
struct wmi_tlv_abi_version abi;
__le32 num_host_mem_chunks;
@@ -2235,6 +2278,31 @@ struct wmi_tlv_tdls_peer_event {
__le32 vdev_id;
} __packed;
+enum wmi_tlv_sys_cap_info_flags {
+ WMI_TLV_SYS_CAP_INFO_RXTX_LED = BIT(0),
+ WMI_TLV_SYS_CAP_INFO_RFKILL = BIT(1),
+};
+
+#define WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
+#define WMI_TLV_RFKILL_CFG_RADIO_LEVEL BIT(6)
+#define WMI_TLV_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
+
+enum wmi_tlv_rfkill_enable_radio {
+ WMI_TLV_RFKILL_ENABLE_RADIO_ON = 0,
+ WMI_TLV_RFKILL_ENABLE_RADIO_OFF = 1,
+};
+
+enum wmi_tlv_rfkill_radio_state {
+ WMI_TLV_RFKILL_RADIO_STATE_OFF = 1,
+ WMI_TLV_RFKILL_RADIO_STATE_ON = 2,
+};
+
+struct wmi_tlv_rfkill_state_change_ev {
+ __le32 gpio_pin_num;
+ __le32 int_type;
+ __le32 radio_state;
+};
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
enum wmi_nlo_auth_algorithm {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4f707c6394bb..9f564e2b7a14 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -742,6 +742,19 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
};
+static struct wmi_peer_param_map wmi_peer_param_map = {
+ .smps_state = WMI_PEER_SMPS_STATE,
+ .ampdu = WMI_PEER_AMPDU,
+ .authorize = WMI_PEER_AUTHORIZE,
+ .chan_width = WMI_PEER_CHAN_WIDTH,
+ .nss = WMI_PEER_NSS,
+ .use_4addr = WMI_PEER_USE_4ADDR,
+ .use_fixed_power = WMI_PEER_USE_FIXED_PWR,
+ .debug = WMI_PEER_DEBUG,
+ .phymode = WMI_PEER_PHYMODE,
+ .dummy_var = WMI_PEER_DUMMY_VAR,
+};
+
/* MAIN WMI VDEV param map */
static struct wmi_vdev_param_map wmi_vdev_param_map = {
.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
@@ -4668,16 +4681,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
rate_code[i],
type);
@@ -4790,7 +4800,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_config_event *ev;
@@ -4806,6 +4816,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
@@ -4822,8 +4839,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5018,16 +5035,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
rate_code[i],
type, pream_idx);
@@ -5043,7 +5057,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_final_table_event *ev;
@@ -5051,12 +5065,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
-
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5069,8 +5095,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5344,11 +5370,14 @@ ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->sw_ver1 = ev->sw_version_1;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
@@ -5383,16 +5412,25 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+ /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
+ * different values. We would need a translation to handle that,
+ * but as we don't currently need anything from sys_cap_info from
+ * WMI interface (only from WMI-TLV) safest it to skip it.
+ */
+
n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
ARRAY_SIZE(arg->mem_reqs));
for (i = 0; i < n; i++)
@@ -5432,6 +5470,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
ar->fw_version_major =
(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
@@ -5441,11 +5480,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
+ ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
+ ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
+ ar->sys_cap_info);
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
@@ -5544,17 +5588,22 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
skip_mem_alloc:
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power),
__le32_to_cpu(arg.max_tx_power),
__le32_to_cpu(arg.ht_cap),
__le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.vht_supp_mcs),
__le32_to_cpu(arg.sw_ver0),
__le32_to_cpu(arg.sw_ver1),
__le32_to_cpu(arg.fw_build),
__le32_to_cpu(arg.phy_capab),
__le32_to_cpu(arg.num_rf_chains),
__le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.low_2ghz_chan),
+ __le32_to_cpu(arg.high_2ghz_chan),
+ __le32_to_cpu(arg.low_5ghz_chan),
+ __le32_to_cpu(arg.high_5ghz_chan),
__le32_to_cpu(arg.num_mem_reqs));
dev_kfree_skb(skb);
@@ -5623,7 +5672,7 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
__le32_to_cpu(arg.sw_version),
__le32_to_cpu(arg.abi_version),
arg.mac_addr,
@@ -9332,6 +9381,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_10_4_cmd_map;
ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9340,6 +9390,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_4_ops;
ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9348,6 +9399,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9356,6 +9408,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_1_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9364,6 +9417,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_ops;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9440,7 +9494,5 @@ void ath10k_wmi_detach(struct ath10k *ar)
}
cancel_work_sync(&ar->svc_rdy_work);
-
- if (ar->svc_rdy_skb)
- dev_kfree_skb(ar->svc_rdy_skb);
+ dev_kfree_skb(ar->svc_rdy_skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e80dbe7e8f4c..74adce1dd3a9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -202,6 +202,7 @@ enum wmi_service {
WMI_SERVICE_REPORT_AIRTIME,
WMI_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
/* Remember to add the new value to wmi_service_name()! */
@@ -496,6 +497,7 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
+ SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
case WMI_SERVICE_MAX:
return NULL;
@@ -3786,6 +3788,8 @@ struct wmi_pdev_param_map {
u32 arp_srcaddr;
u32 arp_dstaddr;
u32 enable_btcoex;
+ u32 rfkill_config;
+ u32 rfkill_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -5071,6 +5075,25 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_peer_param_map {
+ u32 smps_state;
+ u32 ampdu;
+ u32 authorize;
+ u32 chan_width;
+ u32 nss;
+ u32 use_4addr;
+ u32 membership;
+ u32 use_fixed_power;
+ u32 user_pos;
+ u32 crit_proto_hint_enabled;
+ u32 tx_fail_cnt_thr;
+ u32 set_hw_retry_cts2s;
+ u32 ibss_atim_win_len;
+ u32 debug;
+ u32 phymode;
+ u32 dummy_var;
+};
+
struct wmi_vdev_param_map {
u32 rts_threshold;
u32 fragmentation_threshold;
@@ -6842,6 +6865,7 @@ struct wmi_svc_rdy_ev_arg {
__le32 max_tx_power;
__le32 ht_cap;
__le32 vht_cap;
+ __le32 vht_supp_mcs;
__le32 sw_ver0;
__le32 sw_ver1;
__le32 fw_build;
@@ -6849,8 +6873,11 @@ struct wmi_svc_rdy_ev_arg {
__le32 num_rf_chains;
__le32 eeprom_rd;
__le32 num_mem_reqs;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
__le32 low_5ghz_chan;
__le32 high_5ghz_chan;
+ __le32 sys_cap_info;
const __le32 *service_map;
size_t service_map_len;
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 94d34ee02265..307f1fea0a88 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1707,7 +1707,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 offset;
u16 val;
- int ret = 0, i;
+ int i;
offset = AR5K_EEPROM_CTL(ee->ee_version) +
AR5K_EEPROM_N_CTLS(ee->ee_version);
@@ -1730,7 +1730,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
}
}
- return ret;
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index d5ee32ce9eb3..43b4ae86e5fb 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -300,8 +300,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2382c6c46851..6885d2ded53a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -3650,7 +3650,7 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3661,7 +3661,6 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -3689,7 +3688,7 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3700,7 +3699,6 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 2b29bf4730f6..b4885a700296 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{
- u32 data, ko, kg;
+ u32 data = 0, ko, kg;
if (!AR_SREV_9462_20_OR_LATER(ah))
return;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 159490f5a111..956fa7828d0c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -12,7 +12,6 @@
* initialize the chip when the user-space is ready to extract the init code.
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 4e8e80ac8341..9cec5c216e1f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
+ __be16 rs_datalen;
+ bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rxstatus = (struct ath_htc_rx_status *)skb->data;
- if (be16_to_cpu(rxstatus->rs_datalen) -
- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ if (unlikely(rs_datalen -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
ath_err(common,
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+ ath_warn(common,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* Process PHY errors and return so that the packet
* can be dropped.
*/
- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ if (unlikely(is_phyerr)) {
/* TODO: Not using DFS processing now. */
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
&rx_stats, rx_status->mactime)) {
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 92b2dd396436..f3461b193c7a 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -1021,13 +1021,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
static int ath_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(device);
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
- dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
+ dev_info(device, "WOW is enabled, bypassing PCI suspend\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e25bfdf78c2e..20f4f8ea9f89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -33,33 +33,33 @@ static int __ath_regd_init(struct ath_regulatory *reg);
*/
/* Only these channels all allow active scan on all world regulatory domains */
-#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+#define ATH_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
/* We enable active scan on these a case by case basis by regulatory domain */
-#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+#define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
+#define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
NL80211_RRF_NO_IR | \
NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
+#define ATH_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
- ATH9K_2GHZ_CH12_13, \
- ATH9K_2GHZ_CH14
+#define ATH_2GHZ_ALL ATH_2GHZ_CH01_11, \
+ ATH_2GHZ_CH12_13, \
+ ATH_2GHZ_CH14
-#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5470_5850
+#define ATH_5GHZ_ALL ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5470_5850
/* This one skips what we call "mid band" */
-#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5725_5850
+#define ATH_5GHZ_NO_MIDBAND ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5725_5850
/* Can be used for:
* 0x60, 0x61, 0x62 */
@@ -67,8 +67,8 @@ static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
.n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_ALL,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_ALL,
+ ATH_5GHZ_ALL,
}
};
@@ -77,9 +77,9 @@ static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -88,8 +88,8 @@ static const struct ieee80211_regdomain ath_world_regdom_64 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -98,8 +98,8 @@ static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_ALL,
}
};
@@ -108,9 +108,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_ALL,
}
};
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 8abda2760e04..6ba0fd57c951 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2091,7 +2091,7 @@ struct wcn36xx_hal_set_bss_key_rsp_msg {
/*
* This is used configure the key information on a given station.
* When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
- * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a preconfigured key from a BSS the station associated with; otherwise
* a new key descriptor is created based on the key field.
*/
struct wcn36xx_hal_set_sta_key_req_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index a276dae30887..c30fdd0cbf1e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -935,8 +935,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
out:
mutex_unlock(&wcn->conf_mutex);
-
- return;
}
/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index d32c1f4e533a..a8a43c25f843 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains the definitions for the boot loader
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c70854ea5634..7d6f14420855 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index a9befb971cc4..396c94c53702 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 304b4d4e506a..11d0c79e9056 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index a04c87ffd37b..912c4eaf017b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 3e2bbbceca06..6d3413a44b05 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/firmware.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index fa3164765b20..540fa1607794 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_FW_H__
#define __WIL_FW_H__
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 94ebfa338e3f..fbc84c03406b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Algorithmic part of the firmware download.
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index b00a13d6d530..b1480b41cd3a 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 9b72202eeadc..06091d8a9e23 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index a87bb84a8286..07b4a252a23c 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index db087ea58ddf..f26bf046d889 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 18dd8b246022..c174323c5c0b 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -629,8 +618,7 @@ static int __maybe_unused wil6210_pm_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
wil_dbg_pm(wil, "Runtime idle\n");
@@ -644,8 +632,7 @@ static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 56143e7670ed..ed4df561e5c5 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 4b7ac14fc2a7..9b4ca6b256d2 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
index 92b8c4d84a6a..b3d79eb50a43 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.h
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2012-2015 Qualcomm Atheros, Inc. */
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 13246d216803..d385bc03033a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
index cd2534b9c5aa..6909e989baec 100644
--- a/drivers/net/wireless/ath/wil6210/trace.c
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 36ebfcf9ef30..11c989e95880 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 598c1fba9dac..8ebc6d59aa74 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5120475b0cd7..1f4c8ec75be8 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_H
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 04d576deae72..778b63be6a9a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 136c51c338cf..c744c65225da 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_EDMA_H
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 0783c7963621..97626bfd4dac 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL6210_H__
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 772cb00c2002..1332eb8c831f 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 4eed05bddb60..10e10dc9fedf 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index bca090611477..5ff66202238d 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_PLATFORM_H__
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 153b84447e40..7a0d934eb271 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
@@ -2505,7 +2494,8 @@ int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie)
cmd->mgmt_frm_type = type;
/* BUG: FW API define ieLen as u8. Will fix FW */
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len);
kfree(cmd);
out:
@@ -2541,7 +2531,8 @@ int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
}
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
kfree(cmd);
@@ -2715,7 +2706,7 @@ int wmi_get_all_temperatures(struct wil6210_priv *wil,
return rc;
if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
- wil_err(wil, "Failed geting TEMP_SENSE_ALL\n");
+ wil_err(wil, "Failed getting TEMP_SENSE_ALL\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index a2f7034489ae..6bd4ccee28ab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
index 7afc9c5329fb..368eebefa741 100644
--- a/drivers/net/wireless/atmel/atmel_cs.c
+++ b/drivers/net/wireless/atmel/atmel_cs.c
@@ -117,11 +117,9 @@ static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int atmel_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
const struct pcmcia_device_id *did;
- dev = link->priv;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 31bf71a80c26..9733c64bf978 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1400,7 +1400,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
- dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1566,7 +1566,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
- dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ dev->wl->tx_queue_stopped[ring->queue_prio] = false;
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b85603e91c7a..39da1a4c30ac 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3600,7 +3600,7 @@ static void b43_tx_work(struct work_struct *work)
else
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
- wl->tx_queue_stopped[queue_num] = 1;
+ wl->tx_queue_stopped[queue_num] = true;
ieee80211_stop_queue(wl->hw, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
@@ -3611,7 +3611,7 @@ static void b43_tx_work(struct work_struct *work)
}
if (!err)
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
#if B43_DEBUG
@@ -5603,7 +5603,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* Initialize queues and flags. */
for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
skb_queue_head_init(&wl->tx_queue[queue_num]);
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
snprintf(chip_name, ARRAY_SIZE(chip_name),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index fc12598b2dd3..96fd8e2bf773 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1108,7 +1108,8 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
- mmc_pm_flag_t sdio_flags;
+ mmc_pm_flag_t pm_caps, sdio_flags;
+ int ret = 0;
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
@@ -1119,19 +1120,33 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- brcmf_sdiod_freezer_on(sdiodev);
- brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ pm_caps = sdio_get_host_pm_caps(func);
+
+ if (pm_caps & MMC_PM_KEEP_POWER) {
+ /* preserve card power during suspend */
+ brcmf_sdiod_freezer_on(sdiodev);
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->wowl_enabled) {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ /* power will be cut so remove device, probe again in resume */
+ brcmf_sdiod_intr_unregister(sdiodev);
+ ret = brcmf_sdiod_remove(sdiodev);
+ if (ret)
+ brcmf_err("Failed to remove device on suspend\n");
}
- if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- return 0;
+
+ return ret;
}
static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1139,13 +1154,23 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+ mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
+ int ret = 0;
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- brcmf_sdiod_freezer_off(sdiodev);
- return 0;
+ if (!(pm_caps & MMC_PM_KEEP_POWER)) {
+ /* bus was powered off and device removed, probe again */
+ ret = brcmf_sdiod_probe(sdiodev);
+ if (ret)
+ brcmf_err("Failed to probe device on resume\n");
+ } else {
+ brcmf_sdiod_freezer_off(sdiodev);
+ }
+
+ return ret;
}
static const struct dev_pm_ops brcmf_sdio_pm_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index e3ebb7abbdae..5598bbd09b62 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1282,6 +1282,31 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
return err;
}
+static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
+ u16 pwd_len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, pwd_data, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
+}
+
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
@@ -1505,6 +1530,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ val = WPA3_AUTH_SAE_PSK;
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
@@ -1537,6 +1564,10 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
+ case NL80211_AUTHTYPE_SAE:
+ val = 3;
+ brcmf_dbg(CONN, "SAE authentication\n");
+ break;
default:
val = 2;
brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
@@ -1647,6 +1678,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
u16 count;
profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
+ profile->is_ft = false;
if (!sme->crypto.n_akm_suites)
return 0;
@@ -1691,11 +1723,23 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
break;
case WLAN_AKM_SUITE_FT_8021X:
val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+ profile->is_ft = true;
if (sme->want_1x)
profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
break;
case WLAN_AKM_SUITE_FT_PSK:
val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+ profile->is_ft = true;
+ break;
+ default:
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ } else if (val & WPA3_AUTH_SAE_PSK) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_SAE:
+ val = WPA3_AUTH_SAE_PSK;
break;
default:
bphy_err(drvr, "invalid cipher group (%d)\n",
@@ -1773,7 +1817,8 @@ brcmf_set_sharedkey(struct net_device *ndev,
brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise);
- if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+ if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3))
return 0;
if (!(sec->cipher_pairwise &
@@ -1980,7 +2025,13 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- if (sme->crypto.psk) {
+ if (sme->crypto.sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
+
+ if (sme->crypto.psk &&
+ profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
err = -EINVAL;
goto done;
@@ -1998,12 +2049,23 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK) {
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
- if (err)
+ else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
+ /* clean up user-space RSNE */
+ if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) {
+ bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
+ }
+ err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
+ sme->crypto.sae_pwd_len);
+ if (!err && sme->crypto.psk)
+ err = brcmf_set_pmk(ifp, sme->crypto.psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
}
+ if (err)
+ goto done;
/* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
@@ -5359,7 +5421,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
brcmf_dbg(CONN, "Processing set ssid\n");
memcpy(vif->profile.bssid, e->addr, ETH_ALEN);
- if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK)
+ if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK &&
+ vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_SAE)
return true;
set_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
@@ -5554,6 +5617,11 @@ done:
cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
brcmf_dbg(CONN, "Report roaming result\n");
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
+ cfg80211_port_authorized(ndev, profile->bssid, GFP_KERNEL);
+ brcmf_dbg(CONN, "Report port authorized\n");
+ }
+
set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -6664,6 +6732,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD);
}
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 14d5bbad1db1..6ce48f6275a4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -107,7 +107,8 @@ struct brcmf_cfg80211_security {
enum brcmf_profile_fwsup {
BRCMF_PROFILE_FWSUP_NONE,
BRCMF_PROFILE_FWSUP_PSK,
- BRCMF_PROFILE_FWSUP_1X
+ BRCMF_PROFILE_FWSUP_1X,
+ BRCMF_PROFILE_FWSUP_SAE
};
/**
@@ -122,6 +123,7 @@ struct brcmf_cfg80211_profile {
struct brcmf_cfg80211_security sec;
struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
enum brcmf_profile_fwsup use_fwsup;
+ bool is_ft;
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 2c3526aeca6f..1c9c74cc958e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -39,7 +39,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
- { BRCMF_FEAT_DOT11H, "802.11h" }
+ { BRCMF_FEAT_DOT11H, "802.11h" },
+ { BRCMF_FEAT_SAE, "sae" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 736a8179f62f..280a1f6412d4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -26,6 +26,7 @@
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
+ * SAE: simultaneous authentication of equals
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -45,7 +46,8 @@
BRCMF_FEAT_DEF(MONITOR) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
- BRCMF_FEAT_DEF(DOT11H)
+ BRCMF_FEAT_DEF(DOT11H) \
+ BRCMF_FEAT_DEF(SAE)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 37c512036e0e..de0ef1b545c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -61,6 +61,8 @@
#define BRCMF_WSEC_MAX_PSK_LEN 32
#define BRCMF_WSEC_PASSPHRASE BIT(0)
+#define BRCMF_WSEC_MAX_SAE_PASSWORD_LEN 128
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
@@ -518,6 +520,17 @@ struct brcmf_wsec_pmk_le {
u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
};
+/**
+ * struct brcmf_wsec_sae_pwd_le - firmware SAE password material.
+ *
+ * @key_len: number of octets in key materials.
+ * @key: SAE password material.
+ */
+struct brcmf_wsec_sae_pwd_le {
+ __le16 key_len;
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
+};
+
/* Used to get specific STA parameters */
struct brcmf_scb_val_le {
__le32 val;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 6c463475e90b..3184dab41a5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1024,8 +1024,6 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
- memset(ring, 0, size);
-
return (ring);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 14e530601ef3..fabfbb0b40b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -57,6 +57,10 @@ static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
mutex_lock(&pi->req_lock);
+ /* Nothing to do if we have no requests */
+ if (pi->n_reqs == 0)
+ goto done;
+
/* find request */
for (i = 0; i < pi->n_reqs; i++) {
if (pi->reqs[i]->reqid == reqid)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index db783e94f929..5a6d9c86552a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -496,13 +496,11 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
* table and override CDD later
*/
if (li_mimo == &locale_bn) {
- if (li_mimo == &locale_bn) {
- maxpwr20 = QDB(16);
- maxpwr40 = 0;
+ maxpwr20 = QDB(16);
+ maxpwr40 = 0;
- if (chan >= 3 && chan <= 11)
- maxpwr40 = QDB(16);
- }
+ if (chan >= 3 && chan <= 11)
+ maxpwr40 = QDB(16);
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_siso[i] = (u8) maxpwr20;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 080e829da9b3..3f09d89ba922 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -838,9 +838,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
struct dma_pub *dma = NULL;
struct d11txh *txh = NULL;
struct scb *scb = NULL;
- bool free_pdu;
- int tx_rts, tx_frame_count, tx_rts_count;
- uint totlen, supr_status;
+ int tx_frame_count;
+ uint supr_status;
bool lastframe;
struct ieee80211_hdr *h;
u16 mcl;
@@ -917,11 +916,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
CHSPEC_CHANNEL(wlc->default_bss->chanspec));
}
- tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
(txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
- tx_rts_count =
- (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
lastframe = !ieee80211_has_morefrags(h->frame_control);
@@ -989,9 +985,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = p->len;
- free_pdu = true;
-
if (lastframe) {
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
@@ -1816,8 +1809,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
udelay(2);
brcms_b_core_phy_clk(wlc_hw, ON);
- if (pih)
- wlc_phy_anacore(pih, ON);
+ wlc_phy_anacore(pih, ON);
}
/* switch to and initialize new band */
@@ -7384,9 +7376,7 @@ static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
false, true);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
- return;
}
- return;
}
/*
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 7b31c212694d..7552bdb91991 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -231,6 +231,8 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */
#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
+
#define DOT11_DEFAULT_RTS_LEN 2347
#define DOT11_DEFAULT_FRAG_LEN 2346
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 34cfd8162855..0cb36d1b983a 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -999,13 +999,12 @@ static int libipw_read_qos_info_element(struct
/*
* Write QoS parameters from the ac parameters.
*/
-static int libipw_qos_convert_ac_to_parameters(struct
+static void libipw_qos_convert_ac_to_parameters(struct
libipw_qos_parameter_info
*param_elm, struct
libipw_qos_parameters
*qos_param)
{
- int rc = 0;
int i;
struct libipw_qos_ac_parameter *ac_params;
u32 txop;
@@ -1030,7 +1029,6 @@ static int libipw_qos_convert_ac_to_parameters(struct
txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
qos_param->tx_op_limit[i] = cpu_to_le16(txop);
}
- return rc;
}
/*
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 4fbcc7fba3cc..1168055da182 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2301,9 +2301,7 @@ __il3945_down(struct il_priv *il)
il3945_hw_txq_ctx_free(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
/* clear out any free frames */
@@ -3846,9 +3844,7 @@ il3945_pci_remove(struct pci_dev *pdev)
il_free_channel_map(il);
il_free_geos(il);
kfree(il->scan_cmd);
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
ieee80211_free_hw(il->hw);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 51fdd7ce30af..3664f56f8cbd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -3331,7 +3331,6 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
- int ret = 0;
__le16 key_flags = 0;
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
@@ -3368,7 +3367,7 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
spin_unlock_irqrestore(&il->sta_lock, flags);
- return ret;
+ return 0;
}
void
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 73f7bbf742bc..d966b29b45ee 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1072,7 +1072,7 @@ EXPORT_SYMBOL(il_get_channel_info);
static void
il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
{
- const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+ static const __le32 interval[3][IL_POWER_VEC_SIZE] = {
SLP_VEC(2, 2, 4, 6, 0xFF),
SLP_VEC(2, 4, 7, 10, 10),
SLP_VEC(4, 7, 10, 10, 0xFF)
@@ -5182,8 +5182,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5302,10 +5301,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff41987a7e35..0aae3fa4128c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -14,7 +14,8 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
+iwlwifi-objs += fw/dbg.o
+iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 5e355c4957df..8ad771dadae2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -54,9 +54,10 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 50
+#define IWL_22000_UCODE_API_MAX 51
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -137,7 +138,7 @@ static const struct iwl_base_params iwl_22000_base_params = {
.pcie_l1_allowed = true,
};
-static const struct iwl_base_params iwl_22560_base_params = {
+static const struct iwl_base_params iwl_ax210_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -183,33 +184,57 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 60 * 1024, \
- .fw_mon_smem_write_ptr_addr = 0xa0c16c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
.trans.csr = &iwl_csr_v1, \
- .gp2_reg_addr = 0xa02c68
-
-#define IWL_DEVICE_22560 \
- IWL_DEVICE_22000_COMMON, \
- .trans.device_family = IWL_DEVICE_FAMILY_22560, \
- .trans.base_params = &iwl_22560_base_params, \
- .trans.csr = &iwl_csr_v2
+ .gp2_reg_addr = 0xa02c68, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
#define IWL_DEVICE_AX210 \
IWL_DEVICE_22000_COMMON, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
- .trans.base_params = &iwl_22560_base_params, \
+ .trans.base_params = &iwl_ax210_base_params, \
.trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 512
+ .min_256_ba_txq_size = 512, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = DBGC_DBGBUF_WRAP_AROUND, \
+ .mask = 0xffffffff, \
+ }, \
+ .cur_frag = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
+ }, \
+ }
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
@@ -292,39 +317,39 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e8372b67df03..e9155b9b5ee4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,6 +55,7 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "fw/file.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 46
@@ -149,10 +150,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .fw_mon_smem_write_ptr_addr = 0xa0476c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index c2db758b9d54..40fe2d667622 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -61,6 +61,7 @@
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
+#include "fw/runtime.h"
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
{
@@ -245,3 +246,289 @@ out_free:
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
+
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ int i;
+
+ profile->enabled = enabled;
+
+ for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
+ if (table[i].type != ACPI_TYPE_INTEGER ||
+ table[i].integer.value > U8_MAX)
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_set_profile);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ int i, j, idx;
+ int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+ ACPI_SAR_TABLE_SIZE);
+
+ for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+ idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
+ per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *table, *data;
+ bool enabled;
+ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled);
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+ int i, n_profiles, tbl_rev;
+ int ret = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
+ &fwrt->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += ACPI_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int i, j, ret, tbl_rev;
+ int idx = 1;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ fwrt->geo_rev = tbl_rev;
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[idx++];
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > U8_MAX) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->geo_profiles[i].values[j] = entry->integer.value;
+ }
+ }
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ int ret;
+
+ resp = (void *)cmd->resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
+ ret = -EIO;
+ IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret);
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+ int ret, i, j;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return;
+
+ ret = iwl_sar_get_wgds_table(fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ return;
+ }
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&table[i];
+
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ value = &fwrt->geo_profiles[i].values[j *
+ ACPI_GEO_PER_CHAIN_SIZE];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
+ }
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 6cb2d1f5efea..4a6e8262974b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -61,6 +61,12 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_EWRD_METHOD "EWRD"
@@ -104,9 +110,21 @@
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+struct iwl_sar_profile {
+ bool enabled;
+ u8 table[ACPI_SAR_TABLE_SIZE];
+};
+
+struct iwl_geo_profile {
+ u8 values[ACPI_GEO_TABLE_SIZE];
+};
+
#ifdef CONFIG_ACPI
+struct iwl_fw_runtime;
+
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@@ -134,6 +152,27 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev);
*/
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -164,5 +203,50 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
return -ENOENT;
}
+static inline int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
+static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ return -ENOENT;
+}
+
+static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+}
+
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4c3219e7beb6..3643b6ba6385 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -65,6 +65,14 @@
#define __iwl_fw_api_d3_h__
/**
+ * enum iwl_d0i3_flags - d0i3 flags
+ * @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume
+ */
+enum iwl_d0i3_flags {
+ IWL_D0I3_RESET_REQUIRE = BIT(0),
+};
+
+/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
* @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index ba586f148c14..b9d7ed93311c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -60,52 +60,10 @@
#include <linux/bitops.h>
-/**
- * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
- *
- * @tlv_version: version info
- * @apply_point: &enum iwl_fw_ini_apply_point
- * @data: TLV data followed
- */
-struct iwl_fw_ini_header {
- __le32 tlv_version;
- __le32 apply_point;
- u8 data[];
-} __packed; /* FW_DEBUG_TLV_HEADER_S */
-
-/**
- * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
- * buffer allocation TLV - for debug
- *
- * @iwl_fw_ini_header: header
- * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
- * if needed (DBGC1/DBGC2/SDFX/...)
- * @buffer_location: type of iwl_fw_ini_buffer_location
- * @size: size in bytes
- * @max_fragments: the maximum allowed fragmentation in the desired memory
- * allocation above
- * @min_frag_size: the minimum allowed fragmentation size in bytes
- */
-struct iwl_fw_ini_allocation_tlv {
- struct iwl_fw_ini_header header;
- __le32 allocation_id;
- __le32 buffer_location;
- __le32 size;
- __le32 max_fragments;
- __le32 min_frag_size;
-} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_dbg_domain - debug domains
- * allows to send host cmd or collect memory region if a given domain is enabled
- *
- * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
- * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
- */
-enum iwl_fw_ini_dbg_domain {
- IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
- IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
-}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
+#define IWL_FW_INI_MAX_REGION_ID 64
+#define IWL_FW_INI_MAX_NAME 32
+#define IWL_FW_INI_MAX_CFG_NAME 64
+#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
/**
* struct iwl_fw_ini_hcmd
@@ -123,279 +81,198 @@ struct iwl_fw_ini_hcmd {
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
- * Generic Host command pass through TLV
- *
- * @header: header
- * @domain: send command only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @period_msec: period in which the hcmd will be sent to FW. Measured in msec
- * (0 = one time command).
- * @hcmd: a variable length host-command to be sent to apply the configuration.
+ * struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures
+ *
+ * @version: TLV version
+ * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
+ * @data: TLV data
*/
-struct iwl_fw_ini_hcmd_tlv {
- struct iwl_fw_ini_header header;
+struct iwl_fw_ini_header {
+ __le32 version;
__le32 domain;
- __le32 period_msec;
- struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
+ u8 data[];
+} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
-#define IWL_FW_INI_MAX_REGION_ID 64
-#define IWL_FW_INI_MAX_NAME 32
+/**
+ * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
+ *
+ * @size: size of each memory chunk
+ * @offset: offset to add to the base address of each chunk
+ */
+struct iwl_fw_ini_region_dev_addr {
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
+ * struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos
*
- * @id_and_grp: id and group of dhc response.
- * @desc: dhc response descriptor.
+ * @fid: fifos ids array. Used to determine what fifos to collect
+ * @hdr_only: if non zero, collect only the registers
+ * @offset: offset to add to the registers addresses
*/
-struct iwl_fw_ini_region_cfg_dhc {
- __le32 id_and_grp;
- __le32 desc;
-} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
+struct iwl_fw_ini_region_fifos {
+ __le32 fid[2];
+ __le32 hdr_only;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ * struct iwl_fw_ini_region_err_table - error table region data
+ *
+ * Configuration to read Umac/Lmac error table
*
- * @num_of_range: the amount of ranges in the region
- * @range_data_size: size of the data to read per range, in bytes.
+ * @version: version of the error table
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
*/
-struct iwl_fw_ini_region_cfg_internal {
- __le32 num_of_ranges;
- __le32 range_data_size;
-} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */
+struct iwl_fw_ini_region_err_table {
+ __le32 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
- *
- * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
- * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
- * It is unused for tx.
- * @num_of_registers: number of prph registers in the region, each register is
- * 4 bytes size.
- * @header_only: none zero value indicates that this region does not include
- * fifo data and includes only the given registers.
+ * struct iwl_fw_ini_region_internal_buffer - internal buffer region data
+ *
+ * Configuration to read internal monitor buffer
+ *
+ * @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id
+ * @base_addr: internal buffer base address
+ * @size: size internal buffer size
*/
-struct iwl_fw_ini_region_cfg_fifos {
- __le32 fid1;
- __le32 fid2;
- __le32 num_of_registers;
- __le32 header_only;
-} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */
+struct iwl_fw_ini_region_internal_buffer {
+ __le32 alloc_id;
+ __le32 base_addr;
+ __le32 size;
+} __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg
- *
- * @region_id: ID of this dump configuration
- * @region_type: &enum iwl_fw_ini_region_type
- * @domain: dump this region only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @name_len: name length
- * @name: file name to use for this region
- * @internal: used in case the region uses internal memory.
- * @allocation_id: For DRAM type field substitutes for allocation_id
- * @fifos: used in case of fifos region.
- * @dhc_desc: dhc response descriptor.
- * @notif_id_and_grp: dump this region only if the specific notification
- * occurred.
- * @offset: offset to use for each memory base address
- * @start_addr: array of addresses.
+ * struct iwl_fw_ini_region_tlv - region TLV
+ *
+ * Configures parameters for region data collection
+ *
+ * @hdr: debug header
+ * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @type: region type. One of &enum iwl_fw_ini_region_type
+ * @name: region name
+ * @dev_addr: device address configuration. Used by
+ * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
+ * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
+ * &IWL_FW_INI_REGION_RXF
+ * @err_table: error table configuration. Used by
+ * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * @internal_buffer: internal monitor buffer configuration. Used by
+ * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
+ * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * @addrs: array of addresses attached to the end of the region tlv
*/
-struct iwl_fw_ini_region_cfg {
- __le32 region_id;
- __le32 region_type;
- __le32 domain;
- __le32 name_len;
+struct iwl_fw_ini_region_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 id;
+ __le32 type;
u8 name[IWL_FW_INI_MAX_NAME];
union {
- struct iwl_fw_ini_region_cfg_internal internal;
- __le32 allocation_id;
- struct iwl_fw_ini_region_cfg_fifos fifos;
- struct iwl_fw_ini_region_cfg_dhc dhc_desc;
- __le32 notif_id_and_grp;
- }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
- __le32 offset;
- __le32 start_addr[];
-} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
+ struct iwl_fw_ini_region_dev_addr dev_addr;
+ struct iwl_fw_ini_region_fifos fifos;
+ struct iwl_fw_ini_region_err_table err_table;
+ struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ __le32 dram_alloc_id;
+ __le32 tlv_mask;
+ }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
+ __le32 addrs[];
+} __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
- * defines memory regions to dump
+ * struct iwl_fw_ini_debug_info_tlv
+ *
+ * debug configuration name for a specific image
*
- * @header: header
- * @num_regions: how many different region section and IDs are coming next
- * @region_config: list of dump configurations
+ * @hdr: debug header
+ * @image_type: image type
+ * @debug_cfg_name: debug configuration name
*/
-struct iwl_fw_ini_region_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_regions;
- struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
+struct iwl_fw_ini_debug_info_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 image_type;
+ u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers
+ *
+ * @hdr: debug header
+ * @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id
+ * @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location
+ * @req_size: requested buffer size
+ * @max_frags_num: maximum number of fragments
+ * @min_size: minimum buffer size
+ */
+struct iwl_fw_ini_allocation_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 alloc_id;
+ __le32 buf_location;
+ __le32 req_size;
+ __le32 max_frags_num;
+ __le32 min_size;
+} __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger
- *
- * @trigger_id: &enum iwl_fw_ini_trigger_id
- * @override_trig: determines how apply trigger in case a trigger with the
- * same id is already in use. Using the first 2 bytes:
- * Byte 0: if 0, override trigger configuration, otherwise use the
- * existing configuration.
- * Byte 1: if 0, override trigger regions, otherwise append regions to
- * existing trigger.
+ * struct iwl_fw_ini_trigger_tlv - trigger TLV
+ *
+ * Trigger that upon firing, determines what regions to collect
+ *
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @trigger_reason: trigger reason
+ * @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy
* @dump_delay: delay from trigger fire to dump, in usec
- * @occurrences: max amount of times to be fired
- * @reserved: to align to FW struct
+ * @occurrences: max trigger fire occurrences allowed
+ * @reserved: unused
* @ignore_consec: ignore consecutive triggers, in usec
- * @force_restart: force FW restart
+ * @reset_fw: if non zero, will reset and reload the FW
* @multi_dut: initiate debug dump data on several DUTs
- * @trigger_data: generic data to be utilized per trigger
- * @num_regions: number of dump regions defined for this trigger
- * @data: region IDs
+ * @regions_mask: mask of regions to collect
+ * @data: trigger data
*/
-struct iwl_fw_ini_trigger {
- __le32 trigger_id;
- __le32 override_trig;
+struct iwl_fw_ini_trigger_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 trigger_reason;
+ __le32 apply_policy;
__le32 dump_delay;
__le32 occurrences;
__le32 reserved;
__le32 ignore_consec;
- __le32 force_restart;
+ __le32 reset_fw;
__le32 multi_dut;
- __le32 trigger_data;
- __le32 num_regions;
+ __le64 regions_mask;
__le32 data[];
-} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
-
-/**
- * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
- * Triggers that hold memory regions to dump in case a trigger fires
- *
- * @header: header
- * @num_triggers: how many different triggers section and IDs are coming next
- * @trigger_config: list of trigger configurations
- */
-struct iwl_fw_ini_trigger_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_triggers;
- struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
-
-#define IWL_FW_INI_MAX_IMG_NAME_LEN 32
-#define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64
+} __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO)
- *
- * holds image name and debug configuration name
+ * struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV
*
- * @header: header
- * @img_name_len: length of the image name string
- * @img_name: image name string
- * @dbg_cfg_name_len : length of the debug configuration name string
- * @dbg_cfg_name: debug configuration name string
- */
-struct iwl_fw_ini_debug_info_tlv {
- struct iwl_fw_ini_header header;
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 dbg_cfg_name_len;
- u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-} __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_trigger_id
- *
- * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
- * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
- * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
- * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
- * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
- * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
- * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
- * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
- * threshold was crossed
- * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
- * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
- * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
- * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
- * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
- * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
- * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
- * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
- * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
- * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
- * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
- * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
- * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
- * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
- * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
- * failed
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
- * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
- * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
- * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @period_msec: interval at which the hcmd will be sent to the FW.
+ * Measured in msec (0 = one time command)
+ * @hcmd: a variable length host-command to be sent to apply the configuration
*/
-enum iwl_fw_ini_trigger_id {
- IWL_FW_TRIGGER_ID_INVALID = 0,
-
- /* Errors triggers */
- IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
- IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2,
- IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3,
-
- /* FW triggers */
- IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
- IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
-
- /* User trigger */
- IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
-
- /* periodic uses the data field for the interval time */
- IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
-
- /* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
-
- IWL_FW_TRIGGER_ID_NUM,
-}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
+struct iwl_fw_ini_hcmd_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 period_msec;
+ struct iwl_fw_ini_hcmd hcmd;
+} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* enum iwl_fw_ini_allocation_id
@@ -404,9 +281,6 @@ enum iwl_fw_ini_trigger_id {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
- * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
- * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
- * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -414,9 +288,6 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
- IWL_FW_INI_ALLOCATION_ID_SDFX,
- IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
- IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -436,58 +307,47 @@ enum iwl_fw_ini_buffer_location {
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
- * enum iwl_fw_ini_debug_flow
- *
- * @IWL_FW_INI_DEBUG_INVALID: invalid
- * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
- * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
- */
-enum iwl_fw_ini_debug_flow {
- IWL_FW_INI_DEBUG_INVALID,
- IWL_FW_INI_DEBUG_DBTR_FLOW,
- IWL_FW_INI_DEBUG_TB2DTF_FLOW,
-}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */
-
-/**
* enum iwl_fw_ini_region_type
*
* @IWL_FW_INI_REGION_INVALID: invalid
+ * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs
+ * @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer
+ * @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer
+ * @IWL_FW_INI_REGION_TXF: TX fifos
+ * @IWL_FW_INI_REGION_RXF: RX fifo
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
* @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
* @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
- * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
- * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
- * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
- * @IWL_FW_INI_REGION_TXF: TX fifos
- * @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
- * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
- * @IWL_FW_INI_REGION_DHC: dhc response to dump
- * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
- * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
+ * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_INVALID,
+ IWL_FW_INI_REGION_TLV,
+ IWL_FW_INI_REGION_INTERNAL_BUFFER,
+ IWL_FW_INI_REGION_DRAM_BUFFER,
+ IWL_FW_INI_REGION_TXF,
+ IWL_FW_INI_REGION_RXF,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_RSP_OR_NOTIF,
IWL_FW_INI_REGION_DEVICE_MEMORY,
IWL_FW_INI_REGION_PERIPHERY_MAC,
IWL_FW_INI_REGION_PERIPHERY_PHY,
IWL_FW_INI_REGION_PERIPHERY_AUX,
- IWL_FW_INI_REGION_DRAM_BUFFER,
- IWL_FW_INI_REGION_DRAM_IMR,
- IWL_FW_INI_REGION_INTERNAL_BUFFER,
- IWL_FW_INI_REGION_TXF,
- IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
- IWL_FW_INI_REGION_NOTIFICATION,
- IWL_FW_INI_REGION_DHC,
- IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
- IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_DRAM_IMR,
+ IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
IWL_FW_INI_REGION_NUM
-}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
+}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
/**
* enum iwl_fw_ini_time_point
@@ -557,4 +417,22 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
+/**
+ * enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers
+ *
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask.
+ * Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
+ * Append otherwise
+ */
+enum iwl_fw_ini_trigger_apply_policy {
+ IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
+ IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 6b4d59daacd6..e7a1acedbcf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,6 +78,20 @@ enum iwl_mac_conf_subcmd_ids {
*/
CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
/**
+ * @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif
+ */
+ MISSED_VAP_NOTIF = 0xFA,
+ /**
+ * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
+ */
+ SESSION_PROTECTION_CMD = 0x5,
+
+ /**
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ */
+ SESSION_PROTECTION_NOTIF = 0xFB,
+
+ /**
* @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
*/
PROBE_RESPONSE_DATA_NOTIF = 0xFC,
@@ -131,6 +145,21 @@ struct iwl_probe_resp_data_notif {
} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
/**
+ * struct iwl_missed_vap_notif - notification of missing vap detection
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside
+ * @profile_periodicity: beacons period to have our profile inside
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_missed_vap_notif {
+ __le32 mac_id;
+ u8 num_beacon_intervals_elapsed;
+ u8 profile_periodicity;
+ u8 reserved[2];
+} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
+
+/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
* @id_and_color: ID and color of the MAC
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index a93449db7bb2..88bc7733065f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -260,6 +260,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
+#define RX_MPDU_BAND_POS 6
+#define RX_MPDU_BAND_MASK 0xC0
+#define BAND_IN_RX_STATUS(_val) \
+ (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
IWL_RX_L3_TYPE_IPV4,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index c0750ced5ac2..71883ca6a596 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -70,6 +70,9 @@
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
+#define SCAN_SHORT_SSID_MAX_SIZE 8
+#define SCAN_BSSID_MAX_SIZE 16
+
/**
* struct iwl_ssid_ie - directed scan network information element
*
@@ -278,6 +281,9 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
+ IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = BIT(4),
+ IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = BIT(5),
+ IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -638,6 +644,43 @@ enum iwl_umac_scan_general_flags2 {
};
/**
+ * enum iwl_umac_scan_general_flags_v2 - UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ */
+enum iwl_umac_scan_general_flags_v2 {
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11),
+};
+
+/**
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
@@ -831,6 +874,171 @@ struct iwl_scan_req_umac {
#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
/**
+ * struct iwl_scan_probe_params_v3
+ * @preq: scan probe request params
+ * @ssid_num: number of valid SSIDs in direct scan array
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v3 {
+ struct iwl_scan_probe_req preq;
+ u8 ssid_num;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ u8 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
+} __packed;
+
+#define SCAN_MAX_NUM_CHANS_V3 67
+
+/**
+ * struct iwl_scan_channel_params_v3
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v3 {
+ u8 flags;
+ u8 count;
+ __le16 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_channel_params_v4
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @num_of_aps_override: override the number of APs the FW uses to calculate
+ * dwell time when adaptive dwell is used
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ * @adwell_ch_override_bitmap: when using adaptive dwell, override the number
+ * of APs value with &num_of_aps_override for the channel.
+ * To cast channel to index, use &iwl_mvm_scan_ch_and_band_to_idx
+ */
+struct iwl_scan_channel_params_v4 {
+ u8 flags;
+ u8 count;
+ u8 num_of_aps_override;
+ u8 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+ u8 adwell_ch_override_bitmap[16];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+
+/**
+ * struct iwl_scan_general_params_v10
+ * @flags: &enum iwl_umac_scan_flags
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @reserved1: reserved for future
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwl_scan_general_params_v10 {
+ __le16 flags;
+ u8 reserved;
+ u8 scan_start_mac_id;
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_2g;
+ u8 adwell_default_5g;
+ u8 adwell_default_social_chn;
+ u8 reserved1;
+ __le16 adwell_max_budget;
+ __le32 max_out_of_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+
+/**
+ * struct iwl_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwl_scan_periodic_parms_v1 {
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_params_v11
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v3
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v11 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v3 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_params_v12
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v12 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_umac_v11
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v11 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v11 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_umac_v12
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v12 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v12 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
+
+/**
* struct iwl_umac_scan_abort
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 450227f81706..970e9e508ad0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -288,8 +288,7 @@ struct iwl_mvm_keyinfo {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
@@ -369,8 +368,7 @@ enum iwl_sta_type {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 4621ef93a2cf..a731f28e101a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -393,4 +393,82 @@ struct iwl_hs20_roc_res {
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+/**
+ * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (it's duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ */
+enum iwl_mvm_session_prot_conf_id {
+ SESSION_PROTECT_CONF_ASSOC,
+ SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwl_mvm_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 conf_id;
+ __le32 duration_tu;
+ __le32 repetition_count;
+ __le32 interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: the configuration id of the session that started / eneded
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwl_mvm_session_prot_notif {
+ __le32 mac_id;
+ __le32 status;
+ __le32 start;
+ __le32 conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 8511e735c374..f89a9e16a8c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -323,7 +323,7 @@ struct iwl_tx_cmd_gen2 {
} __packed; /* TX_CMD_API_S_VER_7 */
/**
- * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 87421807e040..ed90dd104366 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1055,19 +1055,31 @@ out:
return dump_file;
}
+/**
+ * struct iwl_dump_ini_region_data - region data
+ * @reg_tlv: region TLV
+ * @dump_data: dump data
+ */
+struct iwl_dump_ini_region_data {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fwrt_dump_data *dump_data;
+};
+
static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
u32 prph_val;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
prph_val = iwl_read_prph(fwrt->trans, addr + i);
if (prph_val == 0x5a5a5a5a)
return -EBUSY;
@@ -1078,39 +1090,42 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4)
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
+ range->range_data_size = reg->dev_addr.size;
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
- le32_to_cpu(reg->internal.range_data_size));
+ le32_to_cpu(reg->dev_addr.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
void *range_ptr, int idx)
{
/* increase idx by 1 since the pages are from 1 to
@@ -1133,14 +1148,14 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, reg, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1155,45 +1170,61 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range_ptr,
- int idx)
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 start_addr = iwl_read_umac_prph(fwrt->trans,
- MON_BUFF_BASE_ADDR_VER2);
+ struct iwl_dram_data *frag;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
- if (start_addr == 0x5a5a5a5a)
- return -EBUSY;
+ frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx];
+
+ range->dram_base_addr = cpu_to_le64(frag->physical);
+ range->range_data_size = cpu_to_le32(frag->size);
+
+ memcpy(range->data, frag->block, frag->size);
- range->dram_base_addr = cpu_to_le64(start_addr);
- range->range_data_size = cpu_to_le32(fwrt->trans->dbg.fw_mon[idx].size);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
- memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block,
- fwrt->trans->dbg.fw_mon[idx].size);
+static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(reg->internal_buffer.base_addr);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->internal_buffer.size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(reg->internal_buffer.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, int idx)
+ struct iwl_dump_ini_region_data *reg_data, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
int txf_num = cfg->num_txfifo_entries;
int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
- u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1);
+ u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]);
if (!idx) {
- if (le32_to_cpu(reg->offset) &&
- WARN_ONCE(cfg->num_lmacs == 1,
- "Invalid lmac offset: 0x%x\n",
- le32_to_cpu(reg->offset)))
+ if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) {
+ IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n",
+ le32_to_cpu(reg->fifos.offset));
return false;
+ }
iter->internal_txf = 0;
iter->fifo_size = 0;
iter->fifo = -1;
- if (le32_to_cpu(reg->offset))
+ if (le32_to_cpu(reg->fifos.offset))
iter->lmac = 1;
else
iter->lmac = 0;
@@ -1224,27 +1255,28 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- if (!iwl_ini_txf_iter(fwrt, reg, idx))
+ if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
return -EIO;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
@@ -1253,8 +1285,8 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
* read txf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1263,7 +1295,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1294,11 +1326,12 @@ struct iwl_ini_rxf_data {
};
static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
struct iwl_ini_rxf_data *data)
{
- u32 fid1 = le32_to_cpu(reg->fifos.fid1);
- u32 fid2 = le32_to_cpu(reg->fifos.fid2);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
+ u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
u32 fifo_idx;
if (!data)
@@ -1330,20 +1363,21 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- iwl_ini_get_rxf_data(fwrt, reg, &rxf_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
if (!rxf_data.size)
return -EIO;
@@ -1351,15 +1385,15 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
/*
* read rxf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1368,7 +1402,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1399,9 +1433,50 @@ out:
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
-static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static int
+iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(err_table->base_addr) +
+ le32_to_cpu(err_table->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = err_table->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(err_table->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
+ u32 pkt_len;
+
+ if (!pkt)
+ return -EIO;
+
+ pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
+ range->range_data_size = cpu_to_le32(pkt_len);
+
+ memcpy(range->data, pkt->data, pkt_len);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static void *
+iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1410,14 +1485,45 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->ranges;
}
-static void
-*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- struct iwl_fw_ini_monitor_dump *data,
- u32 write_ptr_addr, u32 write_ptr_msk,
- u32 cycle_cnt_addr, u32 cycle_cnt_msk)
+/**
+ * mask_apply_and_normalize - applies mask on val and normalize the result
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * @val: value
+ * @mask: mask to apply and to normalize with
+ */
+static u32 mask_apply_and_normalize(u32 val, u32 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ const struct iwl_fw_mon_reg *reg_info)
+{
+ u32 val, offs;
+
+ /* The header addresses of DBGCi is calculate as follows:
+ * DBGC1 address + (0x100 * i)
+ */
+ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100;
+
+ if (!reg_info || !reg_info->addr || !reg_info->mask)
+ return 0;
+
+ val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs);
+
+ return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask));
+}
+
+static void *
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ struct iwl_fw_ini_monitor_dump *data,
+ const struct iwl_fw_mon_regs *addrs)
{
- u32 write_ptr, cycle_cnt;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
@@ -1425,76 +1531,66 @@ static void
return NULL;
}
- write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
- cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
+ data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->write_ptr);
+ data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cycle_cnt);
+ data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cur_frag);
iwl_trans_release_nic_access(fwrt->trans, &flags);
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
- data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
- data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
return data->ranges;
}
-static void
-*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
-
- switch (fwrt->trans->trans_cfg->device_family) {
- case IWL_DEVICE_FAMILY_9000:
- case IWL_DEVICE_FAMILY_22000:
- write_ptr_addr = MON_BUFF_WRPTR_VER2;
- write_ptr_msk = -1;
- cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
- cycle_cnt_msk = -1;
- break;
- default:
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
- write_ptr_msk, cycle_cnt_addr,
- cycle_cnt_msk);
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dram_regs);
}
-static void
-*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- const struct iwl_cfg *cfg = fwrt->trans->cfg;
- if (fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) {
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_smem_regs);
+}
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
- cfg->fw_mon_smem_write_ptr_addr,
- cfg->fw_mon_smem_write_ptr_msk,
- cfg->fw_mon_smem_cycle_cnt_ptr_addr,
- cfg->fw_mon_smem_cycle_cnt_ptr_msk);
+static void *
+iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_err_table_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->version = reg->err_table.version;
+
+ return dump->ranges;
}
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return le32_to_cpu(reg->internal.num_of_ranges);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+
+ return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
}
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
if (fwrt->trans->trans_cfg->gen2)
return fwrt->trans->init_dram.paging_cnt;
@@ -1502,54 +1598,73 @@ static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
return fwrt->num_of_paging_blk;
}
-static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return 1;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ if (!fw_mon->frags[i].size)
+ break;
+
+ ranges++;
+ }
+
+ return ranges;
}
static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
u32 num_of_fifos = 0;
- while (iwl_ini_txf_iter(fwrt, reg, num_of_fifos))
+ while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos))
num_of_fifos++;
return num_of_fifos;
}
-static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- /* Each Rx fifo needs a different offset and therefore, it's
- * region can contain only one fifo, i.e. 1 memory range.
- */
return 1;
}
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_error_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_error_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
}
-static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
int i;
u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
u32 size = sizeof(struct iwl_fw_ini_error_dump);
if (fwrt->trans->trans_cfg->gen2) {
- for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
size += range_header_len +
fwrt->trans->init_dram.paging[i].size;
} else {
- for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
+ i++)
size += range_header_len +
fwrt->fw_paging_db[i].fw_paging_size;
}
@@ -1557,66 +1672,128 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
-static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
- sizeof(struct iwl_fw_ini_error_dump_range);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
- if (fwrt->trans->dbg.num_blocks)
- size += fwrt->trans->dbg.fw_mon[0].size;
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ if (!frag->size)
+ break;
+
+ size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size;
+ }
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_monitor_dump);
return size;
}
-static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_monitor_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id), size;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_SRAM_PATH)
+ return 0;
+
+ size = le32_to_cpu(reg->internal_buffer.size);
+ if (!size)
+ return 0;
+
+ size += sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
}
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = 0;
u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num *
+ sizeof(struct iwl_fw_ini_error_dump_register);
- while (iwl_ini_txf_iter(fwrt, reg, size)) {
+ while (iwl_ini_txf_iter(fwrt, reg_data, size)) {
size += fifo_hdr;
- if (!reg->fifos.header_only)
+ if (!reg->fifos.hdr_only)
size += iter->fifo_size;
}
- if (size)
- size += sizeof(struct iwl_fw_ini_error_dump);
+ if (!size)
+ return 0;
- return size;
+ return size + sizeof(struct iwl_fw_ini_error_dump);
}
static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_ini_rxf_data rx_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = sizeof(struct iwl_fw_ini_error_dump) +
sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num * sizeof(struct iwl_fw_ini_error_dump_register);
- if (reg->fifos.header_only)
+ if (reg->fifos.hdr_only)
return size;
- iwl_ini_get_rxf_data(fwrt, reg, &rx_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data);
size += rx_data.size;
return size;
}
+static u32
+iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->err_table.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_err_table_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+
+ if (!reg_data->dump_data->fw_pkt)
+ return 0;
+
+ size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt);
+ if (size)
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1628,14 +1805,15 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
*/
struct iwl_dump_ini_mem_ops {
u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
u32 (*get_size)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *data);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range,
- int idx);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range, int idx);
};
/**
@@ -1650,60 +1828,61 @@ struct iwl_dump_ini_mem_ops {
* @ops: memory dump operations
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
const struct iwl_dump_ini_mem_ops *ops)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
- u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size;
+ u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
+ u32 num_of_ranges, i, size;
void *range;
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
return 0;
- size = ops->get_size(fwrt, reg);
+ size = ops->get_size(fwrt, reg_data);
if (!size)
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*tlv) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
return 0;
entry->size = sizeof(*tlv) + size;
tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(type);
+ tlv->type = reg->type;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
+ type);
- num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
+ num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
- header->region_id = reg->region_id;
+ header->region_id = reg->id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
- header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
- le32_to_cpu(reg->name_len)));
- memcpy(header->name, reg->name, le32_to_cpu(header->name_len));
+ header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
+ memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg, header);
+ range = ops->fill_mem_hdr(fwrt, reg_data, header);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range, i);
if (range_size < 0) {
IWL_ERR(fwrt,
"WRT: Failed to dump region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
range = range + range_size;
@@ -1714,22 +1893,29 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
return entry->size;
out_err:
- kfree(entry);
+ vfree(entry);
return 0;
}
static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fw_ini_trigger_tlv *trigger,
struct list_head *list)
{
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_dump_info *dump;
- u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32);
- u32 size = sizeof(*tlv) + sizeof(*dump) + reg_ids_size;
+ struct iwl_dbg_tlv_node *node;
+ struct iwl_fw_ini_dump_cfg_name *cfg_name;
+ u32 size = sizeof(*tlv) + sizeof(*dump);
+ u32 num_of_cfg_names = 0;
+
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ size += sizeof(*cfg_name);
+ num_of_cfg_names++;
+ }
- entry = kmalloc(sizeof(*entry) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + size);
if (!entry)
return 0;
@@ -1737,13 +1923,14 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
tlv = (void *)entry->data;
tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
- tlv->len = cpu_to_le32(sizeof(*dump) + reg_ids_size);
+ tlv->len = cpu_to_le32(size - sizeof(*tlv));
dump = (void *)tlv->data;
dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
- dump->trigger_id = trigger->trigger_id;
- dump->is_external_cfg =
+ dump->time_point = trigger->time_point;
+ dump->trigger_reason = trigger->trigger_reason;
+ dump->external_cfg_state =
cpu_to_le32(fwrt->trans->dbg.external_ini_cfg);
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
@@ -1763,26 +1950,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
+ dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
+ dump->regions_mask = trigger->regions_mask;
+
dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
memcpy(dump->build_tag, fwrt->fw->human_readable,
sizeof(dump->build_tag));
- dump->img_name_len = cpu_to_le32(sizeof(dump->img_name));
- memcpy(dump->img_name, fwrt->dump.img_name, sizeof(dump->img_name));
-
- dump->internal_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->internal_dbg_cfg_name));
- memcpy(dump->internal_dbg_cfg_name, fwrt->dump.internal_dbg_cfg_name,
- sizeof(dump->internal_dbg_cfg_name));
-
- dump->external_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->external_dbg_cfg_name));
-
- memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name,
- sizeof(dump->external_dbg_cfg_name));
-
- dump->regions_num = trigger->num_regions;
- memcpy(dump->region_ids, trigger->data, reg_ids_size);
+ cfg_name = dump->cfg_names;
+ dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names);
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ struct iwl_fw_ini_debug_info_tlv *debug_info =
+ (void *)node->tlv.data;
+
+ cfg_name->image_type = debug_info->image_type;
+ cfg_name->cfg_name_len =
+ cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
+ sizeof(cfg_name->cfg_name));
+ cfg_name++;
+ }
/* add dump info TLV to the beginning of the list since it needs to be
* the first TLV in the dump
@@ -1794,33 +1981,18 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
- [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
- [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_prph_iter,
+ [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_mon_smem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
+ .fill_range = iwl_dump_ini_mon_smem_iter,
},
- [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
- [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
[IWL_FW_INI_REGION_DRAM_BUFFER] = {
.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges,
.get_size = iwl_dump_ini_mon_dram_get_size,
.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header,
.fill_range = iwl_dump_ini_mon_dram_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
- [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mon_smem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
[IWL_FW_INI_REGION_TXF] = {
.get_num_of_ranges = iwl_dump_ini_txf_ranges,
.get_size = iwl_dump_ini_txf_get_size,
@@ -1828,70 +2000,91 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_range = iwl_dump_ini_txf_iter,
},
[IWL_FW_INI_REGION_RXF] = {
- .get_num_of_ranges = iwl_dump_ini_rxf_ranges,
+ .get_num_of_ranges = iwl_dump_ini_single_range,
.get_size = iwl_dump_ini_rxf_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_rxf_iter,
},
- [IWL_FW_INI_REGION_PAGING] = {
+ [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_RSP_OR_NOTIF] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_fw_pkt_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .get_num_of_ranges = iwl_dump_ini_paging_ranges,
- .get_size = iwl_dump_ini_paging_get_size,
- .fill_range = iwl_dump_ini_paging_iter,
+ .fill_range = iwl_dump_ini_fw_pkt_iter,
},
- [IWL_FW_INI_REGION_CSR] = {
+ [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_csr_iter,
+ .fill_range = iwl_dump_ini_dev_mem_iter,
},
- [IWL_FW_INI_REGION_NOTIFICATION] = {},
- [IWL_FW_INI_REGION_DHC] = {},
- [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_prph_iter,
},
- [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
+ [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
+ [IWL_FW_INI_REGION_PAGING] = {
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_num_of_ranges = iwl_dump_ini_paging_ranges,
+ .get_size = iwl_dump_ini_paging_get_size,
+ .fill_range = iwl_dump_ini_paging_iter,
+ },
+ [IWL_FW_INI_REGION_CSR] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_csr_iter,
},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {},
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask);
- for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
- u32 reg_id = le32_to_cpu(trigger->data[i]), reg_type;
- struct iwl_fw_ini_region_cfg *reg;
+ for (i = 0; i < 64; i++) {
+ u32 reg_type;
+ struct iwl_fw_ini_region_tlv *reg;
- if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
+ if (!(BIT_ULL(i) & regions_mask))
continue;
- reg = fwrt->dump.active_regs[reg_id];
- if (!reg) {
+ reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ if (!reg_data.reg_tlv) {
IWL_WARN(fwrt,
- "WRT: Unassigned region id %d, skipping\n",
- reg_id);
+ "WRT: Unassigned region id %d, skipping\n", i);
continue;
}
- /* currently the driver supports always on domain only */
- if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
- continue;
-
- reg_type = le32_to_cpu(reg->region_type);
+ reg = (void *)reg_data.reg_tlv->data;
+ reg_type = le32_to_cpu(reg->type);
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
- size += iwl_dump_ini_mem(fwrt, list, reg,
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
@@ -1901,31 +2094,43 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
return size;
}
+static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *trig)
+{
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
+ u32 usec = le32_to_cpu(trig->ignore_consec);
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM ||
+ iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec))
+ return false;
+
+ return true;
+}
+
static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id trig_id,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_ini_dump_file_hdr *hdr;
- struct iwl_fw_ini_trigger *trigger;
u32 size;
- if (!iwl_fw_ini_trigger_on(fwrt, trig_id))
- return 0;
-
- trigger = fwrt->dump.active_trigs[trig_id].trig;
- if (!trigger || !le32_to_cpu(trigger->num_regions))
+ if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) ||
+ !le64_to_cpu(trigger->regions_mask))
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*hdr), GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*hdr));
if (!entry)
return 0;
entry->size = sizeof(*hdr);
- size = iwl_dump_ini_trigger(fwrt, trigger, list);
+ size = iwl_dump_ini_trigger(fwrt, dump_data, list);
if (!size) {
- kfree(entry);
+ vfree(entry);
return 0;
}
@@ -1991,18 +2196,24 @@ static void iwl_dump_ini_list_free(struct list_head *list)
list_entry(list->next, typeof(*entry), list);
list_del(&entry->list);
- kfree(entry);
+ vfree(entry);
}
}
-static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
+{
+ dump_data->trig = NULL;
+ kfree(dump_data->fw_pkt);
+ dump_data->fw_pkt = NULL;
+}
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id;
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
struct scatterlist *sg_dump_data;
- u32 file_len;
+ u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
- file_len = iwl_dump_ini_file_gen(fwrt, trig_id, &dump_list);
if (!file_len)
goto out;
@@ -2023,7 +2234,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_dump_ini_list_free(&dump_list);
out:
- fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2038,15 +2249,9 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
- u32 trig_type = le32_to_cpu(desc->trig_desc.type);
- int ret;
-
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
- if (!ret)
- iwl_fw_free_dump_desc(fwrt);
-
- return ret;
+ iwl_fw_free_dump_desc(fwrt);
+ return 0;
}
/* use wks[0] since dump flow prior to ini does not need to support
@@ -2138,35 +2343,29 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- struct iwl_fw_ini_active_triggers *active;
+ struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
u32 occur, delay;
unsigned long idx;
- if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
- return -EINVAL;
+ if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
- if (!iwl_fw_ini_trigger_on(fwrt, id)) {
+ if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
- id);
+ tp_id);
return -EINVAL;
}
- active = &fwrt->dump.active_trigs[id];
- delay = le32_to_cpu(active->trig->dump_delay);
- occur = le32_to_cpu(active->trig->occurrences);
+ delay = le32_to_cpu(trig->dump_delay);
+ occur = le32_to_cpu(trig->occurrences);
if (!occur)
return 0;
- active->trig->occurrences = cpu_to_le32(--occur);
-
- if (le32_to_cpu(active->trig->force_restart)) {
- IWL_WARN(fwrt, "WRT: Force restart: trigger %d fired.\n", id);
- iwl_force_nmi(fwrt->trans);
- return 0;
- }
+ trig->occurrences = cpu_to_le32(--occur);
/* Check there is an available worker.
* ffz return value is undefined if no zero exists,
@@ -2181,36 +2380,14 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
- fwrt->dump.wks[idx].ini_trig_id = id;
+ fwrt->dump.wks[idx].dump_data = *dump_data;
- IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", id);
+ IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id);
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
-IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
-
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
-{
- int id;
-
- switch (legacy_trigger_id) {
- case FW_DBG_TRIGGER_FW_ASSERT:
- case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
- case FW_DBG_TRIGGER_DRIVER:
- id = IWL_FW_TRIGGER_ID_FW_ASSERT;
- break;
- case FW_DBG_TRIGGER_USER:
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- break;
- default:
- return -EIO;
- }
-
- return _iwl_fw_dbg_ini_collect(fwrt, id);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
@@ -2219,6 +2396,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
int ret, len = 0;
char buf[64];
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ return 0;
+
if (fmt) {
va_list ap;
@@ -2322,7 +2502,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
- iwl_fw_error_ini_dump(fwrt, wk_idx);
+ iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
iwl_fw_error_dump(fwrt);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
@@ -2335,11 +2515,10 @@ out:
void iwl_fw_error_dump_wk(struct work_struct *work)
{
- struct iwl_fw_runtime *fwrt;
- typeof(fwrt->dump.wks[0]) *wks;
-
- wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work);
- fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]);
+ struct iwl_fwrt_wk_data *wks =
+ container_of(work, typeof(*wks), wk.work);
+ struct iwl_fw_runtime *fwrt =
+ container_of(wks, typeof(*fwrt), dump.wks[wks->idx]);
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index e3b5dd34643f..179f2905d56b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -114,9 +114,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id);
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, const char *str,
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
@@ -222,29 +221,6 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
-static inline bool
-iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
-{
- struct iwl_fw_ini_trigger *trig;
- u32 usec;
-
- if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
- id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM ||
- !fwrt->dump.active_trigs[id].active)
- return false;
-
- trig = fwrt->dump.active_trigs[id].trig;
- usec = le32_to_cpu(trig->ignore_consec);
-
- if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) {
- IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
- return false;
- }
-
- return true;
-}
-
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -315,10 +291,8 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
int i;
iwl_dbg_tlv_del_timers(fwrt->trans);
- for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
flush_delayed_work(&fwrt->dump.wks[i].wk);
- fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
- }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -381,12 +355,21 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
- if (iwl_trans_dbg_ini_valid(fwrt->trans) && fwrt->trans->dbg.hw_error) {
- _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
+ enum iwl_fw_ini_time_point tp_id;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ return;
+ }
+
+ if (fwrt->trans->dbg.hw_error) {
+ tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
fwrt->trans->dbg.hw_error = false;
} else {
- iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
+
+ iwl_dbg_tlv_time_point(fwrt, tp_id, NULL);
}
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index c1aa4360736b..ca3b1a461dea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,10 +320,45 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
+static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ u32 new_domain;
+ int ret;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return -EIO;
+
+ ret = kstrtou32(buf, 0, &new_domain);
+ if (ret)
+ return ret;
+
+ if (new_domain != fwrt->trans->dbg.domains_bitmap) {
+ ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
+ if (ret)
+ return ret;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ return scnprintf(buf, size, "0x%08x\n",
+ fwrt->trans->dbg.domains_bitmap);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 2e763678dbdb..f008e1bbfdf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -65,6 +65,7 @@
#define __fw_error_dump_h__
#include <linux/types.h>
+#include "fw/api/cmdhdr.h"
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
@@ -327,6 +328,7 @@ struct iwl_fw_ini_fifo_hdr {
* @dram_base_addr: base address of dram monitor range
* @page_num: page number of memory range
* @fifo_hdr: fifo header of memory range
+ * @fw_pkt: FW packet header of memory range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
@@ -336,6 +338,7 @@ struct iwl_fw_ini_error_dump_range {
__le64 dram_base_addr;
__le32 page_num;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ struct iwl_cmd_header fw_pkt_hdr;
};
__le32 data[];
} __packed;
@@ -379,12 +382,23 @@ struct iwl_fw_ini_error_dump_register {
__le32 data;
} __packed;
+/**
+ * struct iwl_fw_ini_dump_cfg_name - configuration name
+ * @image_type: image type the configuration is related to
+ * @cfg_name_len: length of the configuration name
+ * @cfg_name: name of the configuraiton
+ */
+struct iwl_fw_ini_dump_cfg_name {
+ __le32 image_type;
+ __le32 cfg_name_len;
+ u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed;
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
- * @trigger_id: trigger id that caused the dump collection
- * @trigger_reason: not supported yet
- * @is_external_cfg: 1 if an external debug configuration was loaded
- * and 0 otherwise
+ * @time_point: time point that caused the dump collection
+ * @trigger_reason: reason of the trigger
+ * @external_cfg_state: &enum iwl_ini_cfg_state
* @ver_type: FW version type
* @ver_subtype: FW version subype
* @hw_step: HW step
@@ -397,22 +411,18 @@ struct iwl_fw_ini_error_dump_register {
* @lmac_minor: lmac minor version
* @umac_major: umac major version
* @umac_minor: umac minor version
+ * @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location
+ * @regions_mask: bitmap mask of regions ids in the dump
* @build_tag_len: length of the build tag
* @build_tag: build tag string
- * @img_name_len: length of the FW image name
- * @img_name: FW image name
- * @internal_dbg_cfg_name_len: length of the internal debug configuration name
- * @internal_dbg_cfg_name: internal debug configuration name
- * @external_dbg_cfg_name_len: length of the external debug configuration name
- * @external_dbg_cfg_name: external debug configuration name
- * @regions_num: number of region ids
- * @region_ids: region ids the trigger configured to collect
+ * @num_of_cfg_names: number of configuration name structs
+ * @cfg_names: configuration names
*/
struct iwl_fw_ini_dump_info {
__le32 version;
- __le32 trigger_id;
+ __le32 time_point;
__le32 trigger_reason;
- __le32 is_external_cfg;
+ __le32 external_cfg_state;
__le32 ver_type;
__le32 ver_subtype;
__le32 hw_step;
@@ -425,17 +435,24 @@ struct iwl_fw_ini_dump_info {
__le32 lmac_minor;
__le32 umac_major;
__le32 umac_minor;
+ __le32 fw_mon_mode;
+ __le64 regions_mask;
__le32 build_tag_len;
u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 internal_dbg_cfg_name_len;
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 external_dbg_cfg_name_len;
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 regions_num;
- __le32 region_ids[];
+ __le32 num_of_cfg_names;
+ struct iwl_fw_ini_dump_cfg_name cfg_names[];
+} __packed;
+/**
+ * struct iwl_fw_ini_err_table_dump - ini error table dump
+ * @header: header of the region
+ * @version: error table version
+ * @ranges: the memory ranges of this this region
+ */
+struct iwl_fw_ini_err_table_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 version;
+ struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
/**
@@ -457,12 +474,14 @@ struct iwl_fw_error_dump_rb {
* @header: header of the region
* @write_ptr: write pointer position in the buffer
* @cycle_cnt: cycles count
+ * @cur_frag: current fragment in use
* @ranges: the memory ranges of this this region
*/
struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
+ __le32 cur_frag;
struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0d5bc4ce5c07..1554f5fdd483 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
-#define IWL_UCODE_INI_TLV_GROUP 0x1000000
+#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
/*
* new TLV uCode file layout
@@ -151,7 +151,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
- IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
@@ -326,6 +325,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -449,6 +450,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 039576d71276..994880a83652 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -228,18 +228,6 @@ struct iwl_fw_dbg {
};
/**
- * struct iwl_fw_ini_active_triggers
- * @active: is this trigger active
- * @size: allocated memory size of the trigger
- * @trig: trigger
- */
-struct iwl_fw_ini_active_triggers {
- bool active;
- size_t size;
- struct iwl_fw_ini_trigger *trig;
-};
-
-/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -325,4 +313,22 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
+static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return IWL_FW_CMD_VER_UNKNOWN;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWL_FW_CMD_VER_UNKNOWN;
+}
+
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index be436c18a047..c24575ff0e54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -65,7 +65,11 @@
#include "img.h"
#include "fw/api/debug.h"
#include "fw/api/paging.h"
+#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
+#include "fw/acpi.h"
+
+#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -91,6 +95,27 @@ struct iwl_fwrt_shared_mem_cfg {
#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
/**
+ * struct iwl_fwrt_dump_data - dump data
+ * @trig: trigger the worker was scheduled upon
+ * @fw_pkt: packet received from FW
+ */
+struct iwl_fwrt_dump_data {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+};
+
+/**
+ * struct iwl_fwrt_wk_data - dump worker data struct
+ * @idx: index of the worker
+ * @wk: worker
+ */
+struct iwl_fwrt_wk_data {
+ u8 idx;
+ struct delayed_work wk;
+ struct iwl_fwrt_dump_data dump_data;
+};
+
+/**
* struct iwl_txf_iter_data - Tx fifo iterator data struct
* @fifo: fifo number
* @lmac: lmac number
@@ -105,6 +130,14 @@ struct iwl_txf_iter_data {
};
/**
+ * enum iwl_fw_runtime_status - fw runtime status flags
+ * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
+ */
+enum iwl_fw_runtime_status {
+ STATUS_GEN_ACTIVE_TRIGS,
+};
+
+/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -117,6 +150,7 @@ struct iwl_txf_iter_data {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
+ * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -137,33 +171,25 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
+ unsigned long status;
+
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
- struct {
- u8 idx;
- enum iwl_fw_ini_trigger_id ini_trig_id;
- struct delayed_work wk;
- } wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
+ struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
- unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM];
+ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
- struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID];
- struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-
struct {
u8 type;
u8 subtype;
@@ -179,7 +205,16 @@ struct iwl_fw_runtime {
u32 delay;
u64 seq;
} timestamp;
+ bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+#ifdef CONFIG_ACPI
+ struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ u8 sar_chain_a_profile;
+ u8 sar_chain_b_profile;
+ struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+ u32 geo_rev;
+ struct iwl_ppag_table_cmd ppag_table;
+#endif
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
@@ -194,16 +229,6 @@ static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
- for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) {
- struct iwl_fw_ini_active_triggers *active =
- &fwrt->dump.active_trigs[i];
-
- active->active = false;
- active->size = 0;
- kfree(active->trig);
- active->trig = NULL;
- }
-
iwl_dbg_tlv_del_timers(fwrt->trans);
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 214495a7165f..59bb960b460a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -88,7 +88,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
- IWL_DEVICE_FAMILY_22560,
IWL_DEVICE_FAMILY_AX210,
};
@@ -360,6 +359,28 @@ struct iwl_cfg_trans_params {
};
/**
+ * struct iwl_fw_mon_reg - FW monitor register info
+ * @addr: register address
+ * @mask: register mask
+ */
+struct iwl_fw_mon_reg {
+ u32 addr;
+ u32 mask;
+};
+
+/**
+ * struct iwl_fw_mon_regs - FW monitor registers
+ * @write_ptr: write pointer register
+ * @cycle_cnt: cycle count register
+ * @cur_frag: current fragment in use
+ */
+struct iwl_fw_mon_regs {
+ struct iwl_fw_mon_reg write_ptr;
+ struct iwl_fw_mon_reg cycle_cnt;
+ struct iwl_fw_mon_reg cur_frag;
+};
+
+/**
* struct iwl_cfg
* @trans: the trans-specific configuration part
* @name: Official name of the device
@@ -471,12 +492,10 @@ struct iwl_cfg {
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
- u32 fw_mon_smem_write_ptr_addr;
- u32 fw_mon_smem_write_ptr_msk;
- u32 fw_mon_smem_cycle_cnt_ptr_addr;
- u32 fw_mon_smem_cycle_cnt_ptr_msk;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
+ const struct iwl_fw_mon_regs mon_dram_regs;
+ const struct iwl_fw_mon_regs mon_smem_regs;
};
extern const struct iwl_csr_params iwl_csr_v1;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 695bbaa86273..92d9898ab7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -603,9 +603,7 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
- MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(2),
- MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3d7f8ff8ef58..f266647dc08c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -95,6 +95,20 @@ struct iwl_dbg_tlv_ver_data {
int max_ver;
};
+/**
+ * struct iwl_dbg_tlv_timer_node - timer node struct
+ * @list: list of &struct iwl_dbg_tlv_timer_node
+ * @timer: timer
+ * @fwrt: &struct iwl_fw_runtime
+ * @tlv: TLV attach to the timer node
+ */
+struct iwl_dbg_tlv_timer_node {
+ struct list_head list;
+ struct timer_list timer;
+ struct iwl_fw_runtime *fwrt;
+ struct iwl_ucode_tlv *tlv;
+};
+
static const struct iwl_dbg_tlv_ver_data
dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
@@ -104,12 +118,27 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
+static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
+{
+ u32 len = le32_to_cpu(tlv->length);
+ struct iwl_dbg_tlv_node *node;
+
+ node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ list_add_tail(&node->list, list);
+
+ return 0;
+}
+
static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
- u32 ver = le32_to_cpu(hdr->tlv_version);
+ u32 ver = le32_to_cpu(hdr->version);
if (ver < dbg_ver_table[tlv_idx].min_ver ||
ver > dbg_ver_table[tlv_idx].max_ver)
@@ -118,27 +147,169 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
return true;
}
+static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
+
+ if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
+ return -EINVAL;
+
+ IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
+ debug_info->debug_cfg_name);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+}
+
+static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
+ u32 buf_location = le32_to_cpu(alloc->buf_location);
+ u32 alloc_id = le32_to_cpu(alloc->alloc_id);
+
+ if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
+ (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
+ buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
+ return -EINVAL;
+
+ if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
+ (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
+ (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
+ IWL_ERR(trans,
+ "WRT: Invalid allocation id %u for allocation TLV\n",
+ alloc_id);
+ return -EINVAL;
+ }
+
+ trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
+ u32 tp = le32_to_cpu(hcmd->time_point);
+
+ if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
+ return -EINVAL;
+
+ /* Host commands can not be sent in early time point since the FW
+ * is not ready
+ */
+ if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM ||
+ tp == IWL_FW_INI_TIME_POINT_EARLY) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for host command TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+}
+
+static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
+ struct iwl_ucode_tlv **active_reg;
+ u32 id = le32_to_cpu(reg->id);
+ u32 type = le32_to_cpu(reg->type);
+ u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*reg))
+ return -EINVAL;
+
+ if (id >= IWL_FW_INI_MAX_REGION_ID) {
+ IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
+ return -EINVAL;
+ }
+
+ if (type <= IWL_FW_INI_REGION_INVALID ||
+ type >= IWL_FW_INI_REGION_NUM) {
+ IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
+ return -EINVAL;
+ }
+
+ active_reg = &trans->dbg.active_regions[id];
+ if (*active_reg) {
+ IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
+
+ kfree(*active_reg);
+ }
+
+ *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
+ if (!*active_reg)
+ return -ENOMEM;
+
+ IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 tp = le32_to_cpu(trig->time_point);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*trig))
+ return -EINVAL;
+
+ if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for trigger TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(trig->occurrences))
+ trig->occurrences = cpu_to_le32(-1);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+}
+
+static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv) = {
+ [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
+ [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
+ [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
+ [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
+ [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
+};
+
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
- u32 pnt = le32_to_cpu(hdr->apply_point);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
+ int ret;
- IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
- type, pnt);
-
- if (tlv_idx >= IWL_DBG_TLV_TYPE_NUM) {
- IWL_ERR(trans, "WRT: Unsupported TLV 0x%x\n", type);
+ if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
+ IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
}
if (!iwl_dbg_tlv_ver_support(tlv)) {
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
- le32_to_cpu(hdr->tlv_version));
+ le32_to_cpu(hdr->version));
+ goto out_err;
+ }
+
+ ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
+ if (ret) {
+ IWL_ERR(trans,
+ "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
+ type, ret, ext);
goto out_err;
}
@@ -153,13 +324,91 @@ out_err:
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
{
- /* will be used later */
+ struct list_head *timer_list = &trans->dbg.periodic_trig_list;
+ struct iwl_dbg_tlv_timer_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, timer_list, list) {
+ del_timer(&node->timer);
+ list_del(&node->list);
+ kfree(node);
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
+static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ int i;
+
+ if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return;
+
+ fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ dma_free_coherent(trans->dev, frag->size, frag->block,
+ frag->physical);
+
+ frag->physical = 0;
+ frag->block = NULL;
+ frag->size = 0;
+ }
+
+ kfree(fw_mon->frags);
+ fw_mon->frags = NULL;
+ fw_mon->num_frags = 0;
+}
+
void iwl_dbg_tlv_free(struct iwl_trans *trans)
{
- /* will be used again later */
+ struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
+ int i;
+
+ iwl_dbg_tlv_del_timers(trans);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv **active_reg =
+ &trans->dbg.active_regions[i];
+
+ kfree(*active_reg);
+ *active_reg = NULL;
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &trans->dbg.debug_info_tlv_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &tp->active_trig_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
+ iwl_dbg_tlv_fragments_free(trans, i);
}
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
@@ -196,7 +445,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (!iwlwifi_mod_params.enable_ini)
return;
- res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
+ res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
if (res)
return;
@@ -205,10 +454,628 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
release_firmware(fw);
}
+void iwl_dbg_tlv_init(struct iwl_trans *trans)
+{
+ int i;
+
+ INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
+ INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ INIT_LIST_HEAD(&tp->trig_list);
+ INIT_LIST_HEAD(&tp->hcmd_list);
+ INIT_LIST_HEAD(&tp->active_trig_list);
+ }
+}
+
+static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
+ struct iwl_dram_data *frag, u32 pages)
+{
+ void *block = NULL;
+ dma_addr_t physical;
+
+ if (!frag || frag->size || !pages)
+ return -EIO;
+
+ while (pages) {
+ block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
+ &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (block)
+ break;
+
+ IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
+ pages * PAGE_SIZE);
+
+ pages = DIV_ROUND_UP(pages, 2);
+ }
+
+ if (!block)
+ return -ENOMEM;
+
+ frag->physical = physical;
+ frag->block = block;
+ frag->size = pages * PAGE_SIZE;
+
+ return pages;
+}
+
+static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 num_frags, remain_pages, frag_pages;
+ int i;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ if (fw_mon->num_frags ||
+ fw_mon_cfg->buf_location !=
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
+ return 0;
+
+ num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ return -EIO;
+ num_frags = 1;
+ }
+
+ remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
+ PAGE_SIZE);
+ num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+ num_frags = min_t(u32, num_frags, remain_pages);
+ frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
+
+ fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
+ if (!fw_mon->frags)
+ return -ENOMEM;
+
+ for (i = 0; i < num_frags; i++) {
+ int pages = min_t(u32, frag_pages, remain_pages);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
+ alloc_id, i, pages * PAGE_SIZE);
+
+ pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
+ pages);
+ if (pages < 0) {
+ u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
+ (remain_pages * PAGE_SIZE);
+
+ if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
+ iwl_dbg_tlv_fragments_free(fwrt->trans,
+ alloc_id);
+ return pages;
+ }
+ break;
+ }
+
+ remain_pages -= pages;
+ fw_mon->num_frags++;
+ }
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ u32 remain_frags, num_commands;
+ int i, fw_mon_idx = 0;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
+ return 0;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH)
+ return 0;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ /* the first fragment of DBGC1 is given to the FW via register
+ * or context info
+ */
+ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ fw_mon_idx++;
+
+ remain_frags = fw_mon->num_frags - fw_mon_idx;
+ if (!remain_frags)
+ return 0;
+
+ num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ for (i = 0; i < num_commands; i++) {
+ u32 num_frags = min_t(u32, remain_frags,
+ BUF_ALLOC_MAX_NUM_FRAGS);
+ struct iwl_buf_alloc_cmd data = {
+ .alloc_id = cpu_to_le32(alloc_id),
+ .num_frags = cpu_to_le32(num_frags),
+ .buf_location =
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
+ .data[0] = &data,
+ .len[0] = sizeof(data),
+ };
+ int ret, j;
+
+ for (j = 0; j < num_frags; j++) {
+ struct iwl_buf_alloc_frag *frag = &data.frags[j];
+ struct iwl_dram_data *fw_mon_frag =
+ &fw_mon->frags[fw_mon_idx++];
+
+ frag->addr = cpu_to_le64(fw_mon_frag->physical);
+ frag->size = cpu_to_le32(fw_mon_frag->size);
+ }
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ return ret;
+
+ remain_frags -= num_frags;
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
+{
+ int ret, i;
+
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
+static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
+ struct list_head *hcmd_list)
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, hcmd_list, list) {
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
+ struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
+ u32 domain = le32_to_cpu(hcmd->hdr.domain);
+ u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
+ .len = { hcmd_len, },
+ .data = { hcmd_data->data, },
+ };
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_trans_send_cmd(fwrt->trans, &cmd);
+ }
+}
+
+static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
+{
+ struct iwl_dbg_tlv_timer_node *timer_node =
+ from_timer(timer_node, t, timer);
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)timer_node->tlv->data,
+ };
+ int ret;
+
+ ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
+ if (!ret || ret == -EBUSY) {
+ u32 occur = le32_to_cpu(dump_data.trig->occurrences);
+ u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
+
+ if (!occur)
+ return;
+
+ mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_dbg_tlv_node *node;
+ struct list_head *trig_list =
+ &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
+ struct iwl_dbg_tlv_timer_node *timer_node;
+ u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
+ u32 min_interval = 100;
+
+ if (!occur)
+ continue;
+
+ /* make sure there is at least one dword of data for the
+ * interval value
+ */
+ if (le32_to_cpu(node->tlv.length) <
+ sizeof(*trig) + sizeof(__le32)) {
+ IWL_ERR(fwrt,
+ "WRT: Invalid periodic trigger data was not given\n");
+ continue;
+ }
+
+ if (le32_to_cpu(trig->data[0]) < min_interval) {
+ IWL_WARN(fwrt,
+ "WRT: Override min interval from %u to %u msec\n",
+ le32_to_cpu(trig->data[0]), min_interval);
+ trig->data[0] = cpu_to_le32(min_interval);
+ }
+
+ collect_interval = le32_to_cpu(trig->data[0]);
+
+ timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
+ if (!timer_node) {
+ IWL_ERR(fwrt,
+ "WRT: Failed to allocate periodic trigger\n");
+ continue;
+ }
+
+ timer_node->fwrt = fwrt;
+ timer_node->tlv = &node->tlv;
+ timer_setup(&timer_node->timer,
+ iwl_dbg_tlv_periodic_trig_handler, 0);
+
+ list_add_tail(&timer_node->list,
+ &fwrt->trans->dbg.periodic_trig_list);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
+
+ mod_timer(&timer_node->timer,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
+ struct iwl_ucode_tlv *old)
+{
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
+ struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
+ __le32 *new_data = new_trig->data, *old_data = old_trig->data;
+ u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ int i, j;
+
+ for (i = 0; i < new_dwords_num; i++) {
+ bool match = false;
+
+ for (j = 0; j < old_dwords_num; j++) {
+ if (new_data[i] == old_data[j]) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ struct iwl_ucode_tlv *trig_tlv,
+ struct iwl_dbg_tlv_node *node)
+{
+ struct iwl_ucode_tlv *node_tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+ u32 size = le32_to_cpu(trig_tlv->length);
+ u32 trig_data_len = size - sizeof(*trig);
+ u32 offset = 0;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
+ u32 data_len = le32_to_cpu(node_tlv->length) -
+ sizeof(*node_trig);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ offset += data_len;
+ size += data_len;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ }
+
+ if (size != le32_to_cpu(node_tlv->length)) {
+ struct list_head *prev = node->list.prev;
+ struct iwl_dbg_tlv_node *tmp;
+
+ list_del(&node->list);
+
+ tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
+ if (!tmp) {
+ IWL_WARN(fwrt,
+ "WRT: No memory to override trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ list_add(&node->list, prev);
+
+ return -ENOMEM;
+ }
+
+ list_add(&tmp->list, prev);
+ node_tlv = &tmp->tlv;
+ node_trig = (void *)node_tlv->data;
+ }
+
+ memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ node_tlv->length = cpu_to_le32(size);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger configuration (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ /* the first 11 dwords are configuration related */
+ memcpy(node_trig, trig, sizeof(__le32) * 11);
+ }
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask = trig->regions_mask;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask |= trig->regions_mask;
+ }
+
+ return 0;
+}
+
+static int
+iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *trig_list,
+ struct iwl_ucode_tlv *trig_tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ struct iwl_dbg_tlv_node *node, *match = NULL;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+
+ list_for_each_entry(node, trig_list, list) {
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
+ break;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
+ is_trig_data_contained(trig_tlv, &node->tlv)) {
+ match = node;
+ break;
+ }
+ }
+
+ if (!match) {
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ }
+
+ return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
+}
+
+static void
+iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
+ struct iwl_dbg_tlv_time_point_data *tp)
+{
+ struct iwl_dbg_tlv_node *node, *tmp;
+ struct list_head *trig_list = &tp->trig_list;
+ struct list_head *active_trig_list = &tp->active_trig_list;
+
+ list_for_each_entry_safe(node, tmp, active_trig_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_ucode_tlv *tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 domain = le32_to_cpu(trig->hdr.domain);
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
+ }
+}
+
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
+{
+ int i;
+
+ if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
+
+ iwl_fw_flush_dumps(fwrt);
+
+ fwrt->trans->dbg.domains_bitmap = new_domain;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
+
+ clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
+
+ return 0;
+}
+
+static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data)
+{
+ struct iwl_rx_packet *pkt = tp_data->fw_pkt;
+ struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
+
+ if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
+ (pkt->hdr.cmd == wanted_hdr->cmd &&
+ pkt->hdr.group_id == wanted_hdr->group_id))) {
+ struct iwl_rx_packet *fw_pkt =
+ kmemdup(pkt,
+ sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
+ GFP_ATOMIC);
+
+ if (!fw_pkt)
+ return false;
+
+ dump_data->fw_pkt = fw_pkt;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int
+iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *active_trig_list,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ bool (*data_check)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data))
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, active_trig_list, list) {
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)node->tlv.data,
+ };
+ u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
+ data);
+ int ret, i;
+
+ if (!num_data) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_data; i++) {
+ if (!data_check ||
+ data_check(fwrt, &dump_data, tp_data,
+ le32_to_cpu(dump_data.trig->data[i]))) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+{
+ enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
+ int ret, i;
+
+ iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+
+ *ini_dest = IWL_FW_INI_LOCATION_INVALID;
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &fwrt->trans->dbg.fw_mon_cfg[i];
+ u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
+
+ if (dest == IWL_FW_INI_LOCATION_INVALID)
+ continue;
+
+ if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
+ *ini_dest = dest;
+
+ if (dest != *ini_dest)
+ continue;
+
+ ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
- /* will be used later */
+ struct list_head *hcmd_list, *trig_list;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM)
+ return;
+
+ hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
+ trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
+
+ switch (tp_id) {
+ case IWL_FW_INI_TIME_POINT_EARLY:
+ iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
+ iwl_dbg_tlv_apply_buffers(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_PERIODIC:
+ iwl_dbg_tlv_set_periodic_trigs(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ break;
+ case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
+ case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
+ iwl_dbg_tlv_check_fw_pkt);
+ break;
+ default:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index e257ad358c94..f18946872569 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -65,11 +65,11 @@
#include <linux/types.h>
/**
- * struct iwl_apply_point_data
- * @list: list to go through the TLVs of the apply point
- * @tlv: a debug TLV
+ * struct iwl_dbg_tlv_node - debug TLV node
+ * @list: list of &struct iwl_dbg_tlv_node
+ * @tlv: debug TLV
*/
-struct iwl_apply_point_data {
+struct iwl_dbg_tlv_node {
struct list_head list;
struct iwl_ucode_tlv tlv;
};
@@ -82,6 +82,18 @@ union iwl_dbg_tlv_tp_data {
struct iwl_rx_packet *fw_pkt;
};
+/**
+ * struct iwl_dbg_tlv_time_point_data
+ * @trig_list: list of triggers
+ * @active_trig_list: list of active triggers
+ * @hcmd_list: list of host commands
+ */
+struct iwl_dbg_tlv_time_point_data {
+ struct list_head trig_list;
+ struct list_head active_trig_list;
+ struct list_head hcmd_list;
+};
+
struct iwl_trans;
struct iwl_fw_runtime;
@@ -89,9 +101,11 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext);
+void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ff0519ea00a5..4096ccf58b07 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1560,6 +1560,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1636,8 +1638,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1804,7 +1804,7 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, "
"4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 05c1c77c88a0..8836f85afe85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -768,7 +768,7 @@ struct iwlagn_scd_bc_tbl {
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
- * For 22560 and on:
+ * For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c8972f6e38ba..1e240a2a8329 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,11 +156,10 @@ static const u16 iwl_uhb_nvm_channels[] = {
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165, 169, 173, 177, 181,
/* 6-7 GHz */
- 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
- 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
- 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
- 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
- 413, 417, 421
+ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
+ 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
+ 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
};
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
@@ -256,12 +255,12 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
#undef CHECK_AND_PRINT_I
}
-static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
- if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
@@ -299,6 +298,13 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
return flags;
}
+static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
+{
+ if (ch_idx >= NUM_2GHZ_CHANNELS)
+ return NL80211_BAND_5GHZ;
+ return NL80211_BAND_2GHZ;
+}
+
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
@@ -308,7 +314,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int n_channels = 0;
struct ieee80211_channel *channel;
u32 ch_flags;
- int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
+ int num_of_ch;
const u16 *nvm_chan;
if (cfg->uhb_supported) {
@@ -323,7 +329,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
- bool is_5ghz = (ch_idx >= num_2ghz_channels);
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
if (v4)
ch_flags =
@@ -332,12 +339,13 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags =
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
- if (is_5ghz && !data->sku_cap_band_52ghz_enable)
+ if (band == NL80211_BAND_5GHZ &&
+ !data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
- is_5ghz) {
+ band == NL80211_BAND_5GHZ) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@@ -362,8 +370,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = is_5ghz ?
- NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+ channel->band = band;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -379,7 +386,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* don't put limitations in case we're using LAR */
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
- ch_idx, is_5ghz,
+ ch_idx, band,
ch_flags, cfg);
else
channel->flags = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 23c25a7665f2..14c8ba23f3b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -374,6 +374,7 @@
#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
+#define DBGC_CUR_DBGBUF_STATUS_IDX_MSK (0x0f000000)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
@@ -381,6 +382,12 @@
#define DBGC_IN_SAMPLE (0xa03c00)
#define DBGC_OUT_CTRL (0xa03c0c)
+/* M2S registers */
+#define LDBG_M2S_BUF_WPTR (0xa0476c)
+#define LDBG_M2S_BUF_WRAP_CNT (0xa04774)
+#define LDBG_M2S_BUF_WPTR_VAL_MSK (0x000fffff)
+#define LDBG_M2S_BUF_WRAP_CNT_VAL_MSK (0x000fffff)
+
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
#define WFMP_MAC_ADDR_0 0xA03080
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index a31408188ed0..8cadad7364ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -679,6 +679,16 @@ struct iwl_dram_data {
};
/**
+ * struct iwl_fw_mon - fw monitor per allocation id
+ * @num_frags: number of fragments
+ * @frags: an array of DRAM buffer fragments
+ */
+struct iwl_fw_mon {
+ u32 num_frags;
+ struct iwl_dram_data *frags;
+};
+
+/**
* struct iwl_self_init_dram - dram data used by self init process
* @fw: lmac and umac dram data
* @fw_cnt: total number of items in array
@@ -706,10 +716,17 @@ struct iwl_self_init_dram {
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
* @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
- * @num_blocks: number of blocks in fw_mon
- * @fw_mon: address of the buffers for firmware monitor
+ * @fw_mon_cfg: debug buffer allocation configuration
+ * @fw_mon_ini: DRAM buffer fragments per allocation id
+ * @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @active_regions: active regions
+ * @debug_info_tlv_list: list of debug info TLVs
+ * @time_point: array of debug time points
+ * @periodic_trig_list: periodic triggers list
+ * @domains_bitmap: bitmap of active domains other than
+ * &IWL_FW_INI_DOMAIN_ALWAYS_ON
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@@ -726,11 +743,21 @@ struct iwl_trans_debug {
enum iwl_ini_cfg_state internal_ini_cfg;
enum iwl_ini_cfg_state external_ini_cfg;
- int num_blocks;
- struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
+
+ struct iwl_dram_data fw_mon;
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
+
+ struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
+ struct list_head debug_info_tlv_list;
+ struct iwl_dbg_tlv_time_point_data
+ time_point[IWL_FW_INI_TIME_POINT_NUM];
+ struct list_head periodic_trig_list;
+
+ u32 domains_bitmap;
};
/**
@@ -1222,6 +1249,11 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
+static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
+{
+ return trans->state == IWL_TRANS_FW_ALIVE;
+}
+
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
if (trans->ops->sync_nmi)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 86c2c587e755..43ebb2149b63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1939,6 +1939,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (iwl_mvm_check_rt_status(mvm, vif)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
@@ -1955,12 +1957,39 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
if (d0i3_first) {
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0) {
IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
ret);
goto err;
}
+ switch (mvm->cmd_ver.d0i3_resp) {
+ case 0:
+ break;
+ case 1:
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len != sizeof(u32)) {
+ IWL_ERR(mvm,
+ "Error with D0I3_END_CMD response size (%d)\n",
+ len);
+ goto err;
+ }
+ if (IWL_D0I3_RESET_REQUIRE &
+ le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
+ iwl_write32(mvm->trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_free_resp(&cmd);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index ad18c2f1a806..aa659162a7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -148,7 +148,8 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
+ ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -377,7 +378,7 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
pos = scnprintf(buf, bufsz,
"SAR geographic profile disabled\n");
} else {
- value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+ value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0];
pos += scnprintf(buf + pos, bufsz - pos,
"Use geographic profile %d\n", tbl_idx);
@@ -1174,7 +1175,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
int bin_len = count / 2;
int ret = -EINVAL;
size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
@@ -1375,6 +1376,9 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (count == 0)
return 0;
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
+ NULL);
+
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d9eb2b286438..dd685f7eb410 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -514,6 +514,19 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_phy_cfg_cmd phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ if (iwl_mvm_has_unified_ucode(mvm) &&
+ !mvm->trans->cfg->tx_with_siso_diversity)
+ return 0;
+
+ if (mvm->trans->cfg->tx_with_siso_diversity) {
+ /*
+ * TODO: currently we don't set the antenna but letting the NIC
+ * to decide which antenna to use. This should come from BIOS.
+ */
+ phy_cfg_cmd.phy_cfg =
+ cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
+ }
+
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@ -665,181 +678,14 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
}
#ifdef CONFIG_ACPI
-static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
- union acpi_object *table,
- struct iwl_mvm_sar_profile *profile,
- bool enabled)
-{
- int i;
-
- profile->enabled = enabled;
-
- for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
- if ((table[i].type != ACPI_TYPE_INTEGER) ||
- (table[i].integer.value > U8_MAX))
- return -EINVAL;
-
- profile->table[i] = table[i].integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
- int ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
-
- /* position of the actual table */
- table = &wifi_pkg->package.elements[2];
-
- /* The profile from WRDS is officially profile 1, but goes
- * into sar_profiles[0] (because we don't have a profile 0).
- */
- ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
- enabled);
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- bool enabled;
- int i, n_profiles, ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
- n_profiles = wifi_pkg->package.elements[2].integer.value;
-
- /*
- * Check the validity of n_profiles. The EWRD profiles start
- * from index 1, so the maximum value allowed here is
- * ACPI_SAR_PROFILES_NUM - 1.
- */
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
- ret = -EINVAL;
- goto out_free;
- }
-
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
-
- /* The EWRD profiles officially go from 2 to 4, but we
- * save them in sar_profiles[1-3] (because we don't
- * have profile 0). So in the array we start from 1.
- */
- ret = iwl_mvm_sar_set_profile(mvm,
- &wifi_pkg->package.elements[pos],
- &mvm->sar_profiles[i + 1],
- enabled);
- if (ret < 0)
- break;
-
- /* go to the next table */
- pos += ACPI_SAR_TABLE_SIZE;
- }
-
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- int i, j, ret, tbl_rev;
- int idx = 1;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_rev = tbl_rev;
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
- union acpi_object *entry;
-
- entry = &wifi_pkg->package.elements[idx++];
- if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX)) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_profiles[i].values[j] = entry->integer.value;
- }
- }
- ret = 0;
-out_free:
- kfree(data);
- return ret;
-}
-
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
union {
struct iwl_dev_tx_power_cmd v5;
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
- int i, j, idx;
- int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
- int len;
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
- ACPI_SAR_TABLE_SIZE);
+ u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
@@ -848,174 +694,76 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
len = sizeof(cmd.v5);
else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(cmd.v4);
+ len = sizeof(struct iwl_dev_tx_power_cmd_v4);
else
len = sizeof(cmd.v4.v3);
- for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
- struct iwl_mvm_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &mvm->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
- profs[i]);
- /* if one of the profiles is disabled, we fail all */
- return -ENOENT;
- }
-
- IWL_DEBUG_INFO(mvm,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
- idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- cmd.v5.v3.per_chain_restriction[i][j] =
- cpu_to_le16(prof->table[idx]);
- IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
- j, prof->table[idx]);
- }
- }
+ if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
+ prof_a, prof_b))
+ return -ENOENT;
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
-
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
-static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
-{
- /*
- * The GEO_TX_POWER_LIMIT command is not supported on earlier
- * firmware versions. Unfortunately, we don't have a TLV API
- * flag to rely on, so rely on the major version which is in
- * the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
- ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_resp *resp;
- int ret;
+ union geo_tx_power_profiles_cmd geo_tx_cmd;
u16 len;
- void *data;
- struct iwl_geo_tx_power_profiles_cmd geo_cmd;
- struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+ int ret;
struct iwl_host_cmd cmd;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- geo_cmd.ops =
+ if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ geo_tx_cmd.geo_cmd.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd);
- data = &geo_cmd;
+ len = sizeof(geo_tx_cmd.geo_cmd);
} else {
- geo_cmd_v1.ops =
+ geo_tx_cmd.geo_cmd_v1.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd_v1);
- data = &geo_cmd_v1;
+ len = sizeof(geo_tx_cmd.geo_cmd_v1);
}
+ if (!iwl_sar_geo_support(&mvm->fwrt))
+ return -EOPNOTSUPP;
+
cmd = (struct iwl_host_cmd){
.id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
.len = { len, },
.flags = CMD_WANT_SKB,
- .data = { data },
+ .data = { &geo_tx_cmd },
};
- if (!iwl_mvm_sar_geo_support(mvm))
- return -EOPNOTSUPP;
-
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
return ret;
}
-
- resp = (void *)cmd.resp_pkt->data;
- ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
- ret = -EIO;
- IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
- }
-
+ ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
iwl_free_resp(&cmd);
return ret;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_cmd cmd = {
- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
- };
- int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ union geo_tx_power_profiles_cmd cmd;
+ u16 len;
- if (!iwl_mvm_sar_geo_support(mvm))
- return 0;
-
- ret = iwl_mvm_sar_get_wgds_table(mvm);
- if (ret < 0) {
- IWL_DEBUG_RADIO(mvm,
- "Geo SAR BIOS table invalid or unavailable. (%d)\n",
- ret);
- /* we don't fail if the table is not available */
- return 0;
- }
-
- IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
-
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- struct iwl_per_chain_offset *chain =
- (struct iwl_per_chain_offset *)&cmd.table[i];
-
- for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
- u8 *value;
+ cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
- value = &mvm->geo_profiles[i].values[j *
- ACPI_GEO_PER_CHAIN_SIZE];
- chain[j].max_tx_power = cpu_to_le16(value[0]);
- chain[j].chain_a = value[1];
- chain[j].chain_b = value[2];
- IWL_DEBUG_RADIO(mvm,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j, value[1], value[2], value[0]);
- }
- }
+ iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
- cmd.table_revision = cpu_to_le32(mvm->geo_rev);
+ cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
- sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
- &cmd);
+ if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
+ } else {
+ len = sizeof(cmd.geo_cmd);
}
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
@@ -1024,7 +772,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
int i, j, ret, tbl_rev;
int idx = 2;
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -1049,8 +797,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
- mvm->ppag_table.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->ppag_table.enabled) {
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
+ if (!mvm->fwrt.ppag_table.enabled) {
ret = 0;
goto out_free;
}
@@ -1070,11 +818,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
- mvm->ppag_table.gain[i][j] = ent->integer.value;
+ mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
}
}
ret = 0;
@@ -1095,20 +843,20 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->ppag_table.enabled ? "enabled" : "disabled");
+ mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, mvm->ppag_table.gain[i][j]);
+ i, j, mvm->fwrt.ppag_table.gain[i][j]);
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, sizeof(mvm->ppag_table),
- &mvm->ppag_table);
+ 0, sizeof(mvm->fwrt.ppag_table),
+ &mvm->fwrt.ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -1131,17 +879,14 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
}
#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
+ int prof_a, int prof_b)
{
return -ENOENT;
}
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
return -ENOENT;
}
@@ -1151,17 +896,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
return 0;
}
-int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
- int prof_b)
-{
- return -ENOENT;
-}
-
-int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
return -ENOENT;
@@ -1169,7 +903,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
- return -ENOENT;
+ return 0;
}
#endif /* CONFIG_ACPI */
@@ -1228,7 +962,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
int ret;
- ret = iwl_mvm_sar_get_wrds_table(mvm);
+ ret = iwl_sar_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1240,16 +974,14 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return 1;
}
- ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use WRDS, so don't fail */
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
- /* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
-
/*
* If we don't have profile 0 from BIOS, just skip it. This
* means that SAR Geo will not be enabled either, even if we
@@ -1344,12 +1076,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
-
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
}
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
ret = iwl_mvm_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -1480,7 +1212,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
- } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+ } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9c417dd06291..b78992e341d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -855,11 +855,10 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
{
u8 rate;
-
- if (info->band == NL80211_BAND_5GHZ || vif->p2p)
- rate = IWL_FIRST_OFDM_RATE;
- else
+ if (info->band == NL80211_BAND_2GHZ && !vif->p2p)
rate = IWL_FIRST_CCK_RATE;
+ else
+ rate = IWL_FIRST_OFDM_RATE;
return rate;
}
@@ -1404,6 +1403,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
@@ -1432,7 +1432,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
ieee80211_beacon_loss(vif);
iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_MISSED_BEACONS, NULL);
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
@@ -1609,3 +1609,26 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
out_unlock:
rcu_read_unlock();
}
+
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_vap_notif *mb = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(mb->mac_id);
+
+ IWL_DEBUG_INFO(mvm,
+ "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
+ le32_to_cpu(mb->mac_id),
+ mb->num_beacon_intervals_elapsed,
+ mb->profile_periodicity);
+
+ rcu_read_lock();
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (vif)
+ iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
+
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d31f96c3f925..473d56552e26 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -339,14 +339,14 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
-const static u8 he_if_types_ext_capa_sta[] = {
+static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
-const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
+static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
{
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = he_if_types_ext_capa_sta,
@@ -2280,7 +2280,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
+ &mvm->status) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
/*
* If we're restarting then the firmware will
* obviously have lost synchronisation with
@@ -2294,6 +2296,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*
* Set a large maximum delay to allow for more
* than a single interface.
+ *
+ * For new firmware versions, rely on the
+ * firmware. This is relevant for DCM scenarios
+ * only anyway.
*/
u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
iwl_mvm_protect_session(mvm, vif, dur, dur,
@@ -2384,8 +2390,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
+ * A firmware with the new API will remove it automatically.
*/
- iwl_mvm_stop_session_protection(mvm, vif);
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -3255,8 +3264,22 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
duration = req_duration;
mutex_lock(&mvm->mutex);
- /* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
+ /* Try really hard to protect the session and hear a beacon
+ * The new session protection command allows us to protect the
+ * session for a much longer time since the firmware will internally
+ * create two events: a 300TU one with a very high priority that
+ * won't be fragmented which should be enough for 99% of the cases,
+ * and another one (which we configure here to be 900TU long) which
+ * will have a slightly lower priority, but more importantly, can be
+ * fragmented so that it'll allow other activities to run.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+ min_duration);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
mutex_unlock(&mvm->mutex);
}
@@ -3613,8 +3636,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
- (channel->band == NL80211_BAND_2GHZ) ?
- PHY_BAND_24 : PHY_BAND_5,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
PHY_VHT_CHANNEL_MODE20,
0);
@@ -3848,7 +3870,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
- iwl_mvm_stop_roc(mvm);
+ iwl_mvm_stop_roc(mvm, vif);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -4622,7 +4644,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
continue;
if (drop)
- iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 5ca50f39a023..3ec8de00f3aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -188,6 +188,11 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
+union geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
+ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+};
+
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -774,14 +779,6 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_NUM_CIPHERS 10
-struct iwl_mvm_sar_profile {
- bool enabled;
- u8 table[ACPI_SAR_TABLE_SIZE];
-};
-
-struct iwl_mvm_geo_profile {
- u8 values[ACPI_GEO_TABLE_SIZE];
-};
struct iwl_mvm_txq {
struct list_head list;
@@ -1122,6 +1119,10 @@ struct iwl_mvm {
int responses[IWL_MVM_TOF_MAX_APS];
} ftm_initiator;
+ struct {
+ u8 d0i3_resp;
+ } cmd_ver;
+
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
@@ -1140,14 +1141,6 @@ struct iwl_mvm {
/* sniffer data to include in radiotap */
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
-
-#ifdef CONFIG_ACPI
- struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
- struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
- u32 geo_rev;
- struct iwl_ppag_table_cmd ppag_table;
- u32 ppag_rev;
-#endif
};
/* Extract MVM priv from op_mode and _hw */
@@ -1405,12 +1398,19 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
+
static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
}
+static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
+}
+
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@@ -1682,6 +1682,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
@@ -2077,6 +2079,19 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
/* Channel info utils */
static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
{
@@ -2125,11 +2140,12 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
struct cfg80211_chan_def *chandef)
{
+ enum nl80211_band band = chandef->chan->band;
+
iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
- (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5),
- iwl_mvm_get_channel_width(chandef),
- iwl_mvm_get_ctrl_pos(chandef));
+ iwl_mvm_phy_band_from_nl80211(band),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 3acbd5b7ab4b..3b0637311e3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -263,6 +263,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED),
@@ -432,6 +434,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@@ -608,6 +612,27 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
+static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!mvm->fw->ucode_capa.cmd_versions ||
+ !mvm->fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = mvm->fw->ucode_capa.cmd_versions;
+ for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -667,7 +692,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size =
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else {
@@ -722,6 +747,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+ mvm->cmd_ver.d0i3_resp =
+ iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
+ /* we only support version 1 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
+ goto out_free;
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -730,7 +761,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
rb_size_default = IWL_AMSDU_2K;
else
rb_size_default = IWL_AMSDU_4K;
@@ -756,7 +787,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560;
+ mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8f50e2b121bd..e2cf9e015ef8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -341,16 +341,24 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
lq_sta = &mvmsta->lq_sta.rs_fw;
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ char pretty_rate[100];
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
- lq_sta->last_rate_n_flags);
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ lq_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) {
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (WARN_ON(sta->max_amsdu_len < size))
+ /*
+ * In debug sta->max_amsdu_len < size
+ * so also check with orig_amsdu_len which holds the original
+ * data before debugfs changed the value
+ */
+ if (WARN_ON(sta->max_amsdu_len < size &&
+ mvmsta->orig_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
@@ -378,7 +386,7 @@ out:
rcu_read_unlock();
}
-static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 42d525e46e80..1a990ed9c3ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1533,6 +1533,8 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
+ sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
* or 0, since there is no per-TID alg.
@@ -3683,7 +3685,6 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
IWL_DEBUG_RATE(mvm, "leave\n");
}
-#ifdef CONFIG_MAC80211_DEBUGFS
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
@@ -3739,14 +3740,15 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
- type, rs_pretty_ant(ant), bw, mcs, nss,
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
+#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 428642e66658..32104c9f8f5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -445,10 +445,6 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
#endif
-#ifdef CONFIG_MAC80211_DEBUGFS
-void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
-#endif
-
void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update);
@@ -456,4 +452,6 @@ int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 0ad8ed23a455..5ee33c8ae9d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -60,6 +60,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -357,7 +358,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
len = le16_to_cpu(rx_res->byte_count);
- rx_pkt_status = le32_to_cpup((__le32 *)
+ rx_pkt_status = get_unaligned_le32((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
/* Dont use dev_alloc_skb(), we'll have enough headroom once
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 77b03b757193..75a7af5ad7b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1542,6 +1542,19 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
+static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1565,7 +1578,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
channel = desc->v3.channel;
gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
@@ -1667,7 +1680,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
+ IWL_DEVICE_FAMILY_AX210)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
@@ -1678,8 +1691,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
+ if (iwl_mvm_is_band_in_rx_supported(mvm)) {
+ u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
+
+ rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ } else {
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ }
rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index fcafa22ec6ce..60edba558c99 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -79,9 +79,6 @@
#define IWL_SCAN_NUM_OF_FRAGS 3
#define IWL_SCAN_LAST_2_4_CHN 14
-#define IWL_SCAN_BAND_5_2 0
-#define IWL_SCAN_BAND_2_4 1
-
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
/* adaptive dwell max budget time [TU] for directed scan */
@@ -92,6 +89,10 @@
#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
/* adaptive dwell default APs number in social channels (1, 6, 11) */
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* number of scan channels */
+#define IWL_SCAN_NUM_CHANNELS 112
+/* adaptive dwell default number of APs override */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
@@ -196,14 +197,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
-{
- if (band == NL80211_BAND_2GHZ)
- return cpu_to_le32(PHY_BAND_24);
- else
- return cpu_to_le32(PHY_BAND_5);
-}
-
static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
@@ -550,6 +543,7 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
{
int i, j;
int index;
+ u32 tmp_bitmap = 0;
/*
* copy SSIDs from match list.
@@ -569,7 +563,6 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
}
/* add SSIDs from scan SSID list */
- *ssid_bitmap = 0;
for (j = params->n_ssids - 1;
j >= 0 && i < PROBE_OPTION_MAX;
i++, j--) {
@@ -581,11 +574,13 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
ssids[i].len = params->ssids[j].ssid_len;
memcpy(ssids[i].ssid, params->ssids[j].ssid,
ssids[i].len);
- *ssid_bitmap |= BIT(i);
+ tmp_bitmap |= BIT(i);
} else {
- *ssid_bitmap |= BIT(index);
+ tmp_bitmap |= BIT(index);
}
}
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
}
static int
@@ -981,10 +976,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels);
u32 ssid_bitmap = 0;
int i;
-
- lockdep_assert_held(&mvm->mutex);
-
- memset(cmd, 0, ksize(cmd));
+ u8 band;
if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
@@ -1000,7 +992,8 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
vif));
- cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
+ band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
+ cmd->flags = cpu_to_le32(band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
@@ -1414,21 +1407,176 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
+static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+{
+ return iwl_mvm_is_regular_scan(params) ?
+ IWL_SCAN_PRIORITY_EXT_6 :
+ IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static void
+iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
+ struct iwl_scan_general_params_v10 *general_params,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->ssids && params->ssids[0].ssid_len)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ general_params->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ hb_timing = &scan_timing[params->hb_type];
+
+ general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ general_params->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+
+ general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+struct iwl_mvm_scan_channel_segment {
+ u8 start_idx;
+ u8 end_idx;
+ u8 first_channel_id;
+ u8 last_channel_id;
+ u8 channel_spacing_shift;
+ u8 band;
+};
+
+static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
+ {
+ .start_idx = 0,
+ .end_idx = 13,
+ .first_channel_id = 1,
+ .last_channel_id = 14,
+ .channel_spacing_shift = 0,
+ .band = PHY_BAND_24
+ },
+ {
+ .start_idx = 14,
+ .end_idx = 41,
+ .first_channel_id = 36,
+ .last_channel_id = 144,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+ {
+ .start_idx = 42,
+ .end_idx = 50,
+ .first_channel_id = 149,
+ .last_channel_id = 181,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+};
+
+static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
+{
+ int i, index;
+
+ if (!channel_id)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
+ const struct iwl_mvm_scan_channel_segment *ch_segment =
+ &scan_channel_segments[i];
+ u32 ch_offset;
+
+ if (ch_segment->band != band ||
+ ch_segment->first_channel_id > channel_id ||
+ ch_segment->last_channel_id < channel_id)
+ continue;
+
+ ch_offset = (channel_id - ch_segment->first_channel_id) >>
+ ch_segment->channel_spacing_shift;
+
+ index = scan_channel_segments[i].start_idx + ch_offset;
+ if (index < IWL_SCAN_NUM_CHANNELS)
+ return index;
+
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
+ u8 ch_id, u8 band, u8 *ch_bitmap,
+ size_t bitmap_n_entries)
+{
+ int i;
+ static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+ };
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ int ch_idx, bitmap_idx;
+
+ ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
+ if (ch_idx < 0)
+ return;
+
+ bitmap_idx = ch_idx / 8;
+ if (bitmap_idx >= bitmap_n_entries)
+ return;
+
+ ch_idx = ch_idx % 8;
+ ch_bitmap[bitmap_idx] |= BIT(ch_idx);
+
+ return;
+ }
+ }
+}
+
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
- int n_channels, u32 ssid_bitmap,
+ int n_channels, u32 flags,
struct iwl_scan_channel_cfg_umac *channel_cfg)
{
int i;
for (i = 0; i < n_channels; i++) {
- channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].flags = cpu_to_le32(flags);
channel_cfg[i].v1.channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
+ enum nl80211_band band = channels[i]->band;
+
channel_cfg[i].v2.band =
- channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ?
- IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2;
+ iwl_mvm_phy_band_from_nl80211(band);
channel_cfg[i].v2.iter_count = 1;
channel_cfg[i].v2.iter_interval = 0;
} else {
@@ -1438,6 +1586,92 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v4 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ u8 *bitmap = cp->adwell_ch_override_bitmap;
+ size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[i];
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+
+ iwl_mvm_scan_ch_add_n_aps_override(vif_type,
+ cfg->v2.channel_num,
+ cfg->v2.band, bitmap,
+ bitmap_n_entries);
+ }
+}
+
+static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ return flags;
+}
+
+static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ int type)
+{
+ u16 flags = 0;
+
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->measurement_dwell ||
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (IWL_MVM_ADWELL_ENABLE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -1481,8 +1715,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
- if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE &&
- vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
/*
@@ -1517,9 +1750,42 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
return flags;
}
+static int
+iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ int i;
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params,
- int type)
+ int type, int uid)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
@@ -1530,7 +1796,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
struct iwl_scan_req_umac_tail_v1 *tail_v1;
struct iwl_ssid_ie *direct_scan;
- int uid, i;
+ int ret = 0;
u32 ssid_bitmap = 0;
u8 channel_flags = 0;
u16 gen_flags;
@@ -1538,17 +1804,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
- return -EINVAL;
-
- uid = iwl_mvm_scan_uid_by_status(mvm, 0);
- if (uid < 0)
- return uid;
-
- memset(cmd, 0, ksize(cmd));
-
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
mvm->scan_uid_status[uid] = type;
@@ -1591,25 +1846,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param->flags = channel_flags;
chan_param->count = params->n_channels;
- for (i = 0; i < params->n_scan_plans; i++) {
- struct cfg80211_sched_scan_plan *scan_plan =
- &params->scan_plans[i];
-
- tail_v2->schedule[i].iter_count = scan_plan->iterations;
- tail_v2->schedule[i].interval =
- cpu_to_le16(scan_plan->interval);
- }
-
- /*
- * If the number of iterations of the last scan plan is set to
- * zero, it should run infinitely. However, this is not always the case.
- * For example, when regular scan is requested the driver sets one scan
- * plan with one iteration.
- */
- if (!tail_v2->schedule[i - 1].iter_count)
- tail_v2->schedule[i - 1].iter_count = 0xff;
-
- tail_v2->delay = cpu_to_le16(params->delay);
+ ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
+ &tail_v2->delay);
+ if (ret)
+ return ret;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -1627,6 +1867,128 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static void
+iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v10 *gp,
+ u16 gen_flags)
+{
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
+
+ gp->flags = cpu_to_le16(gen_flags);
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ gp->scan_start_mac_id = scan_vif->id;
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v3 *pp)
+{
+ pp->preq = params->preq;
+ pp->ssid_num = params->n_ssids;
+ iwl_scan_build_ssids(params, pp->direct_scan, NULL);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v3 *cp)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, 0,
+ cp->channel_config);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v4 *cp)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE;
+
+ iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
+ params->n_channels, 0, vif->type);
+}
+
+static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
+ &scan_p->channel_params);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params);
+
+ return 0;
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -1729,6 +2091,63 @@ static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
}
}
+struct iwl_scan_umac_handler {
+ u8 version;
+ int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type, int uid);
+};
+
+#define IWL_SCAN_UMAC_HANDLER(_ver) { \
+ .version = _ver, \
+ .handler = iwl_mvm_scan_umac_v##_ver, \
+}
+
+static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
+ /* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(12),
+ IWL_SCAN_UMAC_HANDLER(11),
+};
+
+static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_host_cmd *hcmd,
+ struct iwl_mvm_scan_params *params,
+ int type)
+{
+ int uid, i;
+ u8 scan_ver;
+
+ lockdep_assert_held(&mvm->mutex);
+ memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
+
+ return iwl_mvm_scan_lmac(mvm, vif, params);
+ }
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
+
+ hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+
+ scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
+ const struct iwl_scan_umac_handler *ver_handler =
+ &iwl_scan_umac_handlers[i];
+
+ if (ver_handler->version != scan_ver)
+ continue;
+
+ return ver_handler->handler(mvm, vif, params, type, uid);
+ }
+
+ return iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1786,14 +2205,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params,
- IWL_MVM_SCAN_REGULAR);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
+ IWL_MVM_SCAN_REGULAR);
if (ret)
return ret;
@@ -1891,13 +2304,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (ret)
return ret;
@@ -2048,10 +2455,30 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
+#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
+ case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
+}
+
+static int iwl_scan_req_umac_get_size(u8 scan_ver)
+{
+ switch (scan_ver) {
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
+ }
+
+ return 0;
+}
+
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- int tail_size;
+ int base_size, tail_size;
+ u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ base_size = iwl_scan_req_umac_get_size(scan_ver);
+ if (base_size)
+ return base_size;
+
if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
@@ -2059,6 +2486,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
else if (iwl_mvm_cdb_scan_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+ else
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
if (iwl_mvm_is_scan_ext_chan_supported(mvm))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a06bc63fb516..51b138673ddb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -734,6 +734,11 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
return;
}
+/*
+ * When the firmware supports the session protection API,
+ * this is not needed since it'll automatically remove the
+ * session protection after association + beacon reception.
+ */
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -757,6 +762,101 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
}
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
+ true);
+
+ if (!vif)
+ goto out_unlock;
+
+ /* The vif is not a P2P_DEVICE, maintain its time_event_data */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data =
+ &mvmvif->time_event_data;
+
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "Session protection failure");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ if (le32_to_cpu(notif->start)) {
+ spin_lock_bh(&mvm->time_event_lock);
+ te_data->running = le32_to_cpu(notif->start);
+ te_data->end_jiffies =
+ TU_TO_EXP_TIME(te_data->duration);
+ spin_unlock_bh(&mvm->time_event_lock);
+ } else {
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "No beacon heard and the session protection is over already...");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ goto out_unlock;
+ }
+
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ iwl_mvm_roc_finished(mvm);
+ } else if (le32_to_cpu(notif->start)) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ }
+
+ out_unlock:
+ rcu_read_unlock();
+}
+
+static int
+iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type)
{
@@ -770,6 +870,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
+ duration,
+ type);
+
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
@@ -847,11 +953,44 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_cancel_session_protection(mvm, mvmvif);
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+
+ iwl_mvm_roc_finished(mvm);
+
+ return;
+ }
+
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
@@ -916,3 +1055,51 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
+
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (te_data->running &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ te_data->duration = le32_to_cpu(cmd.duration_tu);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 1dd3d01245ea..df6832b79666 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -178,12 +180,13 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/**
* iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is stopped
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
@@ -242,4 +245,20 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
return !!te_data->uid;
}
+/**
+ * iwl_mvm_schedule_session_protection - schedule a session protection
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the duration of the protection
+ */
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+ */
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f0c539b37ea7..b5a16f00bada 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,6 +8,7 @@
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -482,26 +484,27 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
/* budget in mWatt */
static const u32 iwl_mvm_cdev_budgets[] = {
- 2000, /* cooling state 0 */
- 1800, /* cooling state 1 */
- 1600, /* cooling state 2 */
- 1400, /* cooling state 3 */
- 1200, /* cooling state 4 */
- 1000, /* cooling state 5 */
- 900, /* cooling state 6 */
- 800, /* cooling state 7 */
- 700, /* cooling state 8 */
- 650, /* cooling state 9 */
- 600, /* cooling state 10 */
- 550, /* cooling state 11 */
- 500, /* cooling state 12 */
- 450, /* cooling state 13 */
- 400, /* cooling state 14 */
- 350, /* cooling state 15 */
- 300, /* cooling state 16 */
- 250, /* cooling state 17 */
- 200, /* cooling state 18 */
- 150, /* cooling state 19 */
+ 2400, /* cooling state 0 */
+ 2000, /* cooling state 1 */
+ 1800, /* cooling state 2 */
+ 1600, /* cooling state 3 */
+ 1400, /* cooling state 4 */
+ 1200, /* cooling state 5 */
+ 1000, /* cooling state 6 */
+ 900, /* cooling state 7 */
+ 800, /* cooling state 8 */
+ 700, /* cooling state 9 */
+ 650, /* cooling state 10 */
+ 600, /* cooling state 11 */
+ 550, /* cooling state 12 */
+ 500, /* cooling state 13 */
+ 450, /* cooling state 14 */
+ 400, /* cooling state 15 */
+ 350, /* cooling state 16 */
+ 300, /* cooling state 17 */
+ 250, /* cooling state 18 */
+ 200, /* cooling state 19 */
+ 150, /* cooling state 20 */
};
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8a059da7a1fa..dc5c02fbc65a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -341,8 +341,11 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == NL80211_BAND_5GHZ)
+ /*
+ * For non 2 GHZ band, remap mac80211 rate
+ * indices into driver indices
+ */
+ if (info->band != NL80211_BAND_2GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -547,7 +550,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
}
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) {
+ IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
cmd->offload_assist |= cpu_to_le32(offload_assist);
@@ -935,7 +938,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
- max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
+ /*
+ * Take the min of ieee80211 station and mvm station
+ */
+ max_amsdu_len =
+ min_t(unsigned int, sta->max_amsdu_len,
+ iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
@@ -2051,7 +2059,7 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xff | BIT(IWL_MGMT_TID), flags);
+ 0xffff, flags);
if (internal)
return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 8686107da116..6096276cb0d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -217,7 +217,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
int band_offset = 0;
/* Legacy rate format, search for match in table */
- if (band == NL80211_BAND_5GHZ)
+ if (band != NL80211_BAND_2GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 74980382e64c..a4e09a5b1816 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -55,6 +55,66 @@
#include "internal.h"
#include "iwl-prph.h"
+static void
+iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
+ u32 *control_flags)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 dbg_flags = 0;
+
+ if (!iwl_trans_dbg_ini_valid(trans)) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+
+ if (fw_mon->size) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM buffer destination\n");
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
+ }
+
+ goto out;
+ }
+
+ fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying SMEM buffer destination\n");
+
+ goto out;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_DRAM_PATH &&
+ trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ }
+
+out:
+ if (dbg_flags)
+ *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
+}
+
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
@@ -86,24 +146,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
- IWL_PRPH_SCRATCH_MTR_FORMAT) |
- IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
- IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
- prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ IWL_PRPH_SCRATCH_MTR_FORMAT);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
- /* Configure debug, for integration */
- if (!iwl_trans_dbg_ini_valid(trans))
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->dbg.num_blocks) {
- prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->dbg.fw_mon[0].physical);
- prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->dbg.fw_mon[0].size);
- }
+ iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
+ &control_flags);
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 1047d48beaa5..3da2c7e38ffa 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -166,7 +166,7 @@ struct iwl_rx_completion_desc {
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
- * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
@@ -264,7 +264,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
@@ -702,9 +702,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 19dd075f2f63..4bba6b8a863c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -200,8 +200,8 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* TODO: remove this for 22560 once fw does it */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO: remove this once fw does it */
iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -247,11 +247,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560)
- iwl_write32(trans, HBUS_TARG_WRPTR,
- (rxq->write_actual |
- ((FIRST_RX_QUEUE + rxq->id) << 16)));
- else if (trans->trans_cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -279,7 +275,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -691,7 +687,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
{
struct device *dev = trans->dev;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd)
@@ -712,7 +708,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (rxq->tr_tail)
@@ -736,7 +732,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int i;
int free_size;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status);
@@ -784,11 +780,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
&rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
- /*
- * W/A 22560 device step Z0 must be non zero bug
- * TODO: remove this when stop supporting Z0
- */
- *rxq->cr_tail = cpu_to_le16(500);
return 0;
@@ -808,7 +799,7 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans)
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
if (WARN_ON(trans_pcie->rxq))
@@ -1074,8 +1065,9 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ memset(rxq->rb_stts, 0,
+ (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1152,7 +1144,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
/*
@@ -1347,7 +1339,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1399,7 +1391,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
}
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
@@ -1515,7 +1507,7 @@ out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
@@ -2152,8 +2144,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -2185,17 +2176,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
- inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
- /* Reflect IML transfer status */
- int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
-
- IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
- if (res == IWL_IMAGE_RESP_FAIL) {
- isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
- }
- } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ca3bb4d65b00..0252716c0b24 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -193,7 +193,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
@@ -365,7 +365,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6961f00ff812..af9bc6b64542 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -190,32 +190,36 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
- int i;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- for (i = 0; i < trans->dbg.num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
- trans->dbg.fw_mon[i].block,
- trans->dbg.fw_mon[i].physical);
- trans->dbg.fw_mon[i].block = NULL;
- trans->dbg.fw_mon[i].physical = 0;
- trans->dbg.fw_mon[i].size = 0;
- trans->dbg.num_blocks--;
- }
+ if (!fw_mon->size)
+ return;
+
+ dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
+ fw_mon->physical);
+
+ fw_mon->block = NULL;
+ fw_mon->physical = 0;
+ fw_mon->size = 0;
}
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
u8 max_power, u8 min_power)
{
- void *cpu_addr = NULL;
- dma_addr_t phys = 0;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ void *block = NULL;
+ dma_addr_t physical = 0;
u32 size = 0;
u8 power;
+ if (fw_mon->size)
+ return;
+
for (power = max_power; power >= min_power; power--) {
size = BIT(power);
- cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
- GFP_KERNEL | __GFP_NOWARN);
- if (!cpu_addr)
+ block = dma_alloc_coherent(trans->dev, size, &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!block)
continue;
IWL_INFO(trans,
@@ -224,7 +228,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
break;
}
- if (WARN_ON_ONCE(!cpu_addr))
+ if (WARN_ON_ONCE(!block))
return;
if (power != max_power)
@@ -233,10 +237,9 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
- trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
- trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
- trans->dbg.num_blocks++;
+ fw_mon->block = block;
+ fw_mon->physical = physical;
+ fw_mon->size = size;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@ -253,11 +256,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
max_power))
return;
- /*
- * This function allocats the default fw monitor.
- * The optional additional ones will be allocated in runtime
- */
- if (trans->dbg.num_blocks)
+ if (trans->dbg.fw_mon.size)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@ -891,24 +890,51 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
+static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &trans->dbg.fw_mon_cfg[alloc_id];
+ struct iwl_dram_data *frag;
+
+ if (!iwl_trans_dbg_ini_valid(trans))
+ return;
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
+ return;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH ||
+ !trans->dbg.fw_mon_ini[alloc_id].num_frags)
+ return;
+
+ frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ frag->physical >> MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (frag->physical + frag->size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+}
+
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
+ const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
int i;
if (iwl_trans_dbg_ini_valid(trans)) {
- if (!trans->dbg.num_blocks)
- return;
-
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM buffer[0] destination\n");
- iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->dbg.fw_mon[0].physical >>
- MON_BUFF_SHIFT_VER2);
- iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- MON_BUFF_SHIFT_VER2);
+ iwl_pcie_apply_destination_ini(trans);
return;
}
@@ -959,20 +985,17 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->dbg.fw_mon[0].physical >>
- dest->base_shift);
+ fw_mon->physical >> dest->base_shift);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size -
+ 256) >> dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size) >>
+ dest->end_shift);
}
}
@@ -1006,14 +1029,14 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans, 0);
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- if (trans->dbg.fw_mon[0].size) {
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->dbg.fw_mon[0].physical >> 4);
+ fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >> 4);
+ (fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@ -1112,30 +1135,12 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
-static struct iwl_causes_list causes_list_v2[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i, arr_size =
- (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
- ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
+ int i, arr_size = ARRAY_SIZE(causes_list);
+ struct iwl_causes_list *causes = causes_list;
/*
* Access all non RX causes and map them to the default irq.
@@ -1143,11 +1148,6 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < arr_size; i++) {
- struct iwl_causes_list *causes =
- (trans->trans_cfg->device_family !=
- IWL_DEVICE_FAMILY_22560) ?
- causes_list : causes_list_v2;
-
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
causes[i].cause_num);
@@ -1871,7 +1871,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -2559,7 +2559,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char *buf;
int pos = 0, i, ret;
- size_t bufsz = sizeof(buf);
+ size_t bufsz;
bufsz = sizeof(char) * 121 * trans->num_rx_queues;
@@ -2801,7 +2801,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -2840,7 +2840,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@ -3087,10 +3087,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
u32 len = 0;
if (trans->dbg.dest_tlv ||
- (trans->dbg.num_blocks &&
+ (fw_mon->size &&
(trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@@ -3101,12 +3102,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->dbg.num_blocks) {
- memcpy(fw_mon_data->data,
- trans->dbg.fw_mon[0].block,
- trans->dbg.fw_mon[0].size);
-
- monitor_len = trans->dbg.fw_mon[0].size;
+ if (fw_mon->size) {
+ memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
+ monitor_len = fw_mon->size;
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
@@ -3145,11 +3143,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->dbg.num_blocks) {
+ if (trans->dbg.fw_mon.size) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->dbg.fw_mon[0].size;
- return trans->dbg.fw_mon[0].size;
+ trans->dbg.fw_mon.size;
+ return trans->dbg.fw_mon.size;
} else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
@@ -3604,6 +3602,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->fw_mon_data.mutex);
#endif
+ iwl_dbg_tlv_init(trans);
+
return trans;
out_free_ict:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8894027429d6..8323fa7c0762 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -86,9 +86,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -113,14 +113,14 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* Starting from 22560, the HW expects bytes */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
- /* Until 22560, the HW expects DW */
+ /* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
@@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, iv_len, amsdu_pad;
+ u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
struct tso_t tso;
- /* if the packet is protected, then it must be CCMP or GCMP */
- iv_len = ieee80211_has_protected(hdr->frame_control) ?
- IEEE80211_CCMP_HDR_LEN : 0;
-
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
hdr_page = get_page_hdr(trans, hdr_room);
@@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
start_hdr = hdr_page->pos;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
/*
- * Pull the ieee80211 header + IV to be able to use TSO core,
+ * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
*/
- skb_pull(skb, hdr_len + iv_len);
+ skb_pull(skb, hdr_len);
/*
* Remove the length of all the headers that we don't actually
@@ -364,8 +358,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
}
}
- /* re -add the WiFi header and IV */
- skb_push(skb, hdr_len + iv_len);
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
return 0;
@@ -547,7 +541,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
memset(tfd, 0, sizeof(*tfd));
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
@@ -629,7 +623,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
@@ -1130,7 +1124,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4806a04cec8c..b710b8a25b54 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -949,7 +949,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
bc_tbls_size *= (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 31ae6c4be3a7..03738107fd10 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -769,8 +769,8 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+ "%llu\n");
static int hwsim_write_simulate_radar(void *dat, u64 val)
{
@@ -781,8 +781,8 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
- hwsim_write_simulate_radar, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_simulate_radar, NULL,
+ hwsim_write_simulate_radar, "%llu\n");
static int hwsim_fops_group_read(void *dat, u64 *val)
{
@@ -798,9 +798,9 @@ static int hwsim_fops_group_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
- hwsim_fops_group_read, hwsim_fops_group_write,
- "%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 242d8845da3f..30f1025ecb9b 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1179,6 +1179,10 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!card->workqueue)) {
+ ret = -ENOMEM;
+ goto err_queue;
+ }
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1230,6 +1234,7 @@ err_activate_card:
lbs_remove_card(priv);
free:
destroy_workqueue(card->workqueue);
+err_queue:
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 2747c957d18c..44c8a550da4c 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -1003,7 +1003,6 @@ static int lbs_add_mesh(struct lbs_private *priv)
if (priv->mesh_tlv) {
sprintf(mesh_wdev->ssid, "mesh");
mesh_wdev->mesh_id_up_len = 4;
- ret = 1;
}
mesh_wdev->netdev = mesh_dev;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index eff06d59e9df..fc1706d0647d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -687,8 +687,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
+ kfree(card->evtbd_ring_vbase);
return -1;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1029,8 +1032,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
return -1;
+ }
card->cmdrsp_buf = skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 593c594982cb..98f942b797f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1270,7 +1270,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_FH_PARAMS:
- if (element_len + 2 < sizeof(*fh_param_set))
+ if (total_ie_len < sizeof(*fh_param_set))
return -EINVAL;
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
@@ -1280,7 +1280,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_DS_PARAMS:
- if (element_len + 2 < sizeof(*ds_param_set))
+ if (total_ie_len < sizeof(*ds_param_set))
return -EINVAL;
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
@@ -1293,7 +1293,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_CF_PARAMS:
- if (element_len + 2 < sizeof(*cf_param_set))
+ if (total_ie_len < sizeof(*cf_param_set))
return -EINVAL;
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
@@ -1303,7 +1303,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_IBSS_PARAMS:
- if (element_len + 2 < sizeof(*ibss_param_set))
+ if (total_ie_len < sizeof(*ibss_param_set))
return -EINVAL;
ibss_param_set =
(struct ieee_types_ibss_param_set *)
@@ -1460,10 +1460,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
}
- current_ptr += element_len + 2;
-
- /* Need to account for IE ID and IE Len */
- bytes_left -= (element_len + 2);
+ current_ptr += total_ie_len;
+ bytes_left -= total_ie_len;
} /* while (bytes_left > 2) */
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 92305bd31aa1..4209209ac940 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -77,10 +77,7 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
+ dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 5e549831370c..300242bce799 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -27,7 +27,7 @@ mt76_reg_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 06f5702ab4bd..d863ab4a66c9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
do {
val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
- if (val && ~val)
+ if (val && val != 0xff)
break;
} while (--i);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index d90016125dfc..aa0ed0f2b973 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -897,6 +897,45 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
+ int ret;
+
+ ret = qtnf_cmd_get_tx_power(vif, dbm);
+ if (ret)
+ pr_err("MAC%u: failed to get Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
+static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_vif *vif;
+ int ret;
+
+ if (wdev) {
+ vif = qtnf_netdev_get_priv(wdev->netdev);
+ } else {
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n",
+ mac->macid);
+ return -EFAULT;
+ }
+ }
+
+ ret = qtnf_cmd_set_tx_power(vif, type, mbm);
+ if (ret)
+ pr_err("MAC%u: failed to set Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
@@ -991,6 +1030,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
.set_power_mgmt = qtnf_set_power_mgmt,
+ .get_tx_power = qtnf_get_tx_power,
+ .set_tx_power = qtnf_set_tx_power,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index dc0c7244b60e..61bda34e2ac2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -83,6 +83,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
struct qlink_cmd *cmd;
struct qlink_resp *resp = NULL;
struct sk_buff *resp_skb = NULL;
+ int resp_res = 0;
u16 cmd_id;
u8 mac_id;
u8 vif_id;
@@ -113,6 +114,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
}
resp = (struct qlink_resp *)resp_skb->data;
+ resp_res = le16_to_cpu(resp->result);
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
if (ret)
@@ -128,8 +130,8 @@ out:
else
consume_skb(resp_skb);
- if (!ret && resp)
- return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+ if (!ret)
+ return qtnf_cmd_resp_result_decode(resp_res);
pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
mac_id, vif_id, cmd_id, ret);
@@ -2641,6 +2643,71 @@ out:
return ret;
}
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_GET;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+ if (ret)
+ goto out;
+
+ resp = (const struct qlink_resp_txpwr *)resp_skb->data;
+ *dbm = MBM_TO_DBM(le32_to_cpu(resp->txpwr));
+
+out:
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_SET;
+ cmd->txpwr_setting = type;
+ cmd->txpwr = cpu_to_le32(mbm);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 88d7a3cd90d2..e0de65261213 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -70,6 +70,9 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm);
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm);
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 8d699cc03d26..8116b224c946 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -67,6 +67,14 @@ static int qtnf_netdev_close(struct net_device *ndev)
return 0;
}
+static void qtnf_packet_send_hi_pri(struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
+
+ skb_queue_tail(&vif->high_pri_tx_queue, skb);
+ queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
+}
+
/* Netdev handler for data transmission.
*/
static netdev_tx_t
@@ -107,6 +115,12 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* tx path is enabled: reset vif timeout */
vif->cons_tx_timeout_cnt = 0;
+ if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
+ qtnf_packet_send_hi_pri(skb);
+ qtnf_update_tx_stats(ndev, skb);
+ return NETDEV_TX_OK;
+ }
+
return qtnf_bus_data_tx(mac->bus, skb);
}
@@ -841,15 +855,6 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
-
- skb_queue_tail(&vif->high_pri_tx_queue, skb);
- queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
-}
-EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
-
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 322858df600c..e3feea31191e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -152,7 +152,6 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb);
struct dentry *qtnf_get_debugfs_dir(void);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index b57c8c18a8d0..51af93bdf06e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -171,8 +171,9 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
- vif->mac->macid, vif->vifid, join_info->bssid, status);
+ pr_debug("VIF%u.%u: BSSID:%pM chan:%u status:%u\n",
+ vif->mac->macid, vif->vifid, join_info->bssid,
+ le16_to_cpu(join_info->chan.chan.center_freq), status);
if (status != WLAN_STATUS_SUCCESS)
goto done;
@@ -181,7 +182,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
if (!cfg80211_chandef_valid(&chandef)) {
pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
vif->mac->macid, vif->vifid,
- chandef.chan->center_freq,
+ chandef.chan ? chandef.chan->center_freq : 0,
chandef.center_freq1,
chandef.center_freq2,
chandef.width);
@@ -617,6 +618,42 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
return ret;
}
+static int
+qtnf_event_handle_mic_failure(struct qtnf_vif *vif,
+ const struct qlink_event_mic_failure *mic_ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ u8 pairwise;
+
+ if (len < sizeof(*mic_ev)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_mic_failure));
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ pr_err("VIF%u.%u: MIC_FAILURE event when not in STA mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ pairwise = mic_ev->pairwise ?
+ NL80211_KEYTYPE_PAIRWISE : NL80211_KEYTYPE_GROUP;
+
+ pr_info("%s: MIC error: src=%pM key_index=%u pairwise=%u\n",
+ vif->netdev->name, mic_ev->src, mic_ev->key_index, pairwise);
+
+ cfg80211_michael_mic_failure(vif->netdev, mic_ev->src, pairwise,
+ mic_ev->key_index, NULL, GFP_KERNEL);
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -679,6 +716,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_external_auth(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_MIC_FAILURE:
+ ret = qtnf_event_handle_mic_failure(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 8ae318b5fe54..5337e67092ca 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -33,7 +33,7 @@ static unsigned int tx_bd_size_param;
module_param(tx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
-static unsigned int rx_bd_size_param = 256;
+static unsigned int rx_bd_size_param;
module_param(rx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
@@ -130,6 +130,8 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ char card_id[64];
int ret;
bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
@@ -137,7 +139,9 @@ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
if (ret) {
pr_err("failed to attach core\n");
} else {
- qtnf_debugfs_init(bus, DRV_NAME);
+ snprintf(card_id, sizeof(card_id), "%s:%s",
+ DRV_NAME, pci_name(priv->pdev));
+ qtnf_debugfs_init(bus, card_id);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
@@ -337,7 +341,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
- pcie_priv->rx_bd_num = rx_bd_size_param;
pcie_priv->flashboot = flashboot;
if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ)
@@ -354,7 +357,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
- pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
@@ -377,7 +379,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->epmem_bar = epmem_bar;
pci_save_state(pdev);
- ret = pcie_priv->probe_cb(bus, tx_bd_size_param);
+ ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param);
if (ret)
goto error;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index 5e8b9cb68419..2a6a928e13bd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -23,7 +23,8 @@
struct qtnf_pcie_bus_priv {
struct pci_dev *pdev;
- int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size);
+ int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size);
void (*remove_cb)(struct qtnf_bus *bus);
int (*suspend_cb)(struct qtnf_bus *bus);
int (*resume_cb)(struct qtnf_bus *bus);
@@ -62,7 +63,6 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
- u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 3aa3714d4dfd..a501a1fd5332 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -24,6 +24,7 @@
#include "debug.h"
#define PEARL_TX_BD_SIZE_DEFAULT 32
+#define PEARL_RX_BD_SIZE_DEFAULT 256
struct qtnf_pearl_bda {
__le16 bda_len;
@@ -244,8 +245,6 @@ static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
/* tx bd */
- memset(vaddr, 0, len);
-
ps->bd_table_vaddr = vaddr;
ps->bd_table_paddr = paddr;
ps->bd_table_len = len;
@@ -399,7 +398,8 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
}
static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
@@ -411,28 +411,29 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("bad tx_bd_size value %u\n", tx_bd_size);
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
} else {
priv->tx_bd_num = tx_bd_size;
}
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
+ if (rx_bd_size == 0)
+ rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
- if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
- pr_err("rx_bd_size_param %u is not power of two\n",
- priv->rx_bd_num);
- return -EINVAL;
- }
+ val = rx_bd_size * sizeof(dma_addr_t);
- val = priv->rx_bd_num * sizeof(dma_addr_t);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("rx_bd_size_param %u is too large\n",
- priv->rx_bd_num);
- return -EINVAL;
+ if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
+ priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
+ } else {
+ priv->rx_bd_num = rx_bd_size;
}
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
ret = pearl_hhbm_init(ps);
if (ret) {
pr_err("failed to init h/w queues\n");
@@ -1066,7 +1067,8 @@ static u64 qtnf_pearl_dma_mask_get(void)
#endif
}
-static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
@@ -1081,7 +1083,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
- ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size);
+ ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size, rx_bd_size);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 9a4380ed7f1b..a0587472736f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -23,6 +23,7 @@
#include "debug.h"
#define TOPAZ_TX_BD_SIZE_DEFAULT 128
+#define TOPAZ_RX_BD_SIZE_DEFAULT 256
struct qtnf_topaz_tx_bd {
__le32 addr;
@@ -199,8 +200,6 @@ static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, len);
-
/* tx bd */
ts->tx_bd_vbase = vaddr;
@@ -333,7 +332,8 @@ static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
}
static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_topaz_bda __iomem *bda = ts->bda;
struct qtnf_pcie_bus_priv *priv = &ts->base;
@@ -351,6 +351,17 @@ static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
priv->tx_bd_num = tx_bd_size;
qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
+
+ if (rx_bd_size == 0)
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+
+ if (rx_bd_size > TOPAZ_RX_BD_SIZE_DEFAULT) {
+ pr_warn("RX BD queue cannot exceed %d\n",
+ TOPAZ_RX_BD_SIZE_DEFAULT);
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+ }
+
+ priv->rx_bd_num = rx_bd_size;
qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
priv->rx_bd_w_index = 0;
@@ -498,13 +509,6 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
- if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
- qtnf_packet_send_hi_pri(skb);
- qtnf_update_tx_stats(skb->dev, skb);
- priv->tx_eapol++;
- return NETDEV_TX_OK;
- }
-
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@@ -768,7 +772,6 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
- seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
@@ -1113,7 +1116,8 @@ static u64 qtnf_topaz_dma_mask_get(void)
return DMA_BIT_MASK(32);
}
-static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
+static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
+ unsigned int tx_bd_num, unsigned int rx_bd_num)
{
struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
struct pci_dev *pdev = ts->base.pdev;
@@ -1147,7 +1151,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
return ret;
}
- ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
+ ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 8a3c6344fa8e..59c69c0a6e06 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -217,6 +217,8 @@ struct qlink_sta_info_state {
* command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
* capability.
* @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
+ * @QLINK_CMD_TXPWR: get or set current channel transmit power for
+ * the specified MAC.
*/
enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
@@ -254,6 +256,7 @@ enum qlink_cmd_type {
QLINK_CMD_PM_SET = 0x0062,
QLINK_CMD_WOWLAN_SET = 0x0063,
QLINK_CMD_EXTERNAL_AUTH = 0x0066,
+ QLINK_CMD_TXPWR = 0x0067,
};
/**
@@ -719,6 +722,32 @@ struct qlink_cmd_pm_set {
} __packed;
/**
+ * enum qlink_txpwr_op - transmit power operation type
+ * @QLINK_TXPWR_SET: set tx power
+ * @QLINK_TXPWR_GET: get current tx power setting
+ */
+enum qlink_txpwr_op {
+ QLINK_TXPWR_SET,
+ QLINK_TXPWR_GET
+};
+
+/**
+ * struct qlink_cmd_txpwr - get or set current transmit power
+ *
+ * @txpwr: new transmit power setting, in mBm
+ * @txpwr_setting: transmit power setting type, one of
+ * &enum nl80211_tx_power_setting
+ * @op_type: type of operation, one of &enum qlink_txpwr_op
+ */
+struct qlink_cmd_txpwr {
+ struct qlink_cmd chdr;
+ __le32 txpwr;
+ u8 txpwr_setting;
+ u8 op_type;
+ u8 rsvd[2];
+} __packed;
+
+/**
* enum qlink_wowlan_trigger
*
* @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
@@ -944,6 +973,19 @@ struct qlink_resp_channel_get {
struct qlink_chandef chan;
} __packed;
+/**
+ * struct qlink_resp_txpwr - response for QLINK_CMD_TXPWR command
+ *
+ * This response is intended for QLINK_TXPWR_GET operation and does not
+ * contain any meaningful information in case of QLINK_TXPWR_SET operation.
+ *
+ * @txpwr: current transmit power setting, in mBm
+ */
+struct qlink_resp_txpwr {
+ struct qlink_resp rhdr;
+ __le32 txpwr;
+} __packed;
+
/* QLINK Events messages related definitions
*/
@@ -958,6 +1000,7 @@ enum qlink_event_type {
QLINK_EVENT_FREQ_CHANGE = 0x0028,
QLINK_EVENT_RADAR = 0x0029,
QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
+ QLINK_EVENT_MIC_FAILURE = 0x0031,
};
/**
@@ -1151,6 +1194,20 @@ struct qlink_event_external_auth {
u8 action;
} __packed;
+/**
+ * struct qlink_event_mic_failure - data for QLINK_EVENT_MIC_FAILURE event
+ *
+ * @src: source MAC address of the frame
+ * @key_index: index of the key being reported
+ * @pairwise: whether the key is pairwise or group
+ */
+struct qlink_event_mic_failure {
+ struct qlink_event ehdr;
+ u8 src[ETH_ALEN];
+ u8 key_index;
+ u8 pairwise;
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 25466454b73e..a36c3fea7495 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT6352)) {
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
- rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index 23cd4ff78e54..e1bf41c278a5 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -37,53 +37,11 @@ static const u8 cck_ofdm_gain_settings[] = {
0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
};
-static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
- 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
-};
-
-static const u8 rtl8225se_tx_power_cck[] = {
- 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
- 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
- 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
- 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
- 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
- 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
-};
-
-static const u8 rtl8225se_tx_power_cck_ch14[] = {
- 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
- 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
- 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
- 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
- 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
- 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225se_tx_power_ofdm[] = {
- 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
-};
-
static const u32 rtl8225se_chan[] = {
0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
};
-static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
- 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225sez2_tx_power_cck_B[] = {
- 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck_A[] = {
- 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck[] = {
- 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
-};
-
static const u8 ZEBRA_AGC[] = {
0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index ade057d868f7..6598c8d786ea 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1187,6 +1187,79 @@ struct rtl8723bu_c2h {
struct rtl8xxxu_fileops;
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0,
+ /* Sub-Element */
+ WIRELESS_MODE_B = BIT(0),
+ WIRELESS_MODE_G = BIT(1),
+ WIRELESS_MODE_A = BIT(2),
+ WIRELESS_MODE_N_24G = BIT(3),
+ WIRELESS_MODE_N_5G = BIT(4),
+ WIRELESS_AUTO = BIT(5),
+ WIRELESS_MODE_AC = BIT(6),
+ WIRELESS_MODE_MAX = 0x7F,
+};
+
+/* from rtlwifi/wifi.h */
+enum ratr_table_mode_new {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+ RATEID_IDX_MIX1 = 11,
+ RATEID_IDX_MIX2 = 12,
+ RATEID_IDX_VHT_3SS = 13,
+ RATEID_IDX_BGN_3SS = 14,
+};
+
+#define BT_INFO_8723B_1ANT_B_FTP BIT(7)
+#define BT_INFO_8723B_1ANT_B_A2DP BIT(6)
+#define BT_INFO_8723B_1ANT_B_HID BIT(5)
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT(4)
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT(3)
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT(2)
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT(1)
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT(0)
+
+enum _BT_8723B_1ANT_STATUS {
+ BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_STATUS_MAX
+};
+
+struct rtl8xxxu_btcoex {
+ u8 bt_status;
+ bool bt_busy;
+ bool has_sco;
+ bool has_a2dp;
+ bool has_hid;
+ bool has_pan;
+ bool hid_only;
+ bool a2dp_only;
+ bool c2h_bt_inquiry;
+};
+
+#define RTL8XXXU_RATR_STA_INIT 0
+#define RTL8XXXU_RATR_STA_HIGH 1
+#define RTL8XXXU_RATR_STA_MID 2
+#define RTL8XXXU_RATR_STA_LOW 3
+
+#define RTL8XXXU_NOISE_FLOOR_MIN -100
+#define RTL8XXXU_SNR_THRESH_HIGH 50
+#define RTL8XXXU_SNR_THRESH_LOW 20
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1291,6 +1364,17 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
+ u8 rssi_level;
+ /*
+ * Only one virtual interface permitted because only STA mode
+ * is supported and no iface_combinations are provided.
+ */
+ struct ieee80211_vif *vif;
+ struct delayed_work ra_watchdog;
+ struct work_struct c2hcmd_work;
+ struct sk_buff_head c2hcmd_queue;
+ spinlock_t c2hcmd_lock;
+ struct rtl8xxxu_btcoex bt_coex;
};
struct rtl8xxxu_rx_urb {
@@ -1326,7 +1410,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1341,6 +1425,7 @@ struct rtl8xxxu_fileops {
u8 has_s0s1:1;
u8 has_tx_report:1;
u8 gen2_thermal_meter:1;
+ u8 needs_full_init:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -1411,9 +1496,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
@@ -1437,6 +1522,8 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index c747f6a1922d..9f1f93d04145 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1011,7 +1011,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -1021,11 +1021,11 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index ceffe05bd65b..a71e1816e632 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -882,7 +882,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok /*, path_b_ok */;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -892,11 +892,11 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -1580,9 +1580,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
/*
* Software control, antenna at WiFi side
*/
-#ifdef NEED_PS_TDMA
rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
-#endif
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
@@ -1670,6 +1668,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.has_s0s1 = 1,
.has_tx_report = 1,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index c6c41fb962ff..1d94cab6f71f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1255,7 +1255,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
- u32 val32, rsr;
+ u32 val32;
u8 val8, subchannel;
u16 rf_mode_bw;
bool ht = true;
@@ -1264,7 +1264,6 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
rf_mode_bw = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL);
rf_mode_bw &= ~WMAC_TRXPTCL_CTL_BW_MASK;
- rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
channel = hw->conf.chandef.chan->hw_value;
/* Hack */
@@ -3115,7 +3114,7 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -3125,11 +3124,11 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -3820,9 +3819,8 @@ void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-#ifdef NEED_PS_TDMA
-static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
- u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
{
struct h2c_cmd h2c;
@@ -3835,7 +3833,6 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
h2c.b_type_dma.data5 = arg5;
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
}
-#endif
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
{
@@ -3902,6 +3899,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
+ if (fops->needs_full_init)
+ macpower = false;
+
ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
@@ -4304,7 +4304,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
@@ -4324,7 +4325,7 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
u8 bw = 0;
@@ -4338,7 +4339,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
h2c.ramask.arg = 0x80;
- h2c.b_macid_cfg.data1 = 0;
+ h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
@@ -4478,6 +4479,35 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
+static u16
+rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
+{
+ u16 network_type = WIRELESS_MODE_UNKNOWN;
+
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_5G;
+
+ network_type |= WIRELESS_MODE_A;
+ } else {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_24G;
+
+ if (sta->supp_rates[0] <= 0xf)
+ network_type |= WIRELESS_MODE_B;
+ else if (sta->supp_rates[0] & 0xf)
+ network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G);
+ else
+ network_type |= WIRELESS_MODE_G;
+ }
+
+ return network_type;
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changed)
@@ -4520,7 +4550,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sgi = 1;
rcu_read_unlock();
- priv->fops->update_rate_mask(priv, ramask, sgi);
+ priv->vif = vif;
+ priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
+
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -5148,12 +5181,269 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
+/*
+ * The RTL8723BU/RTL8192EU vendor driver use coexistence table type
+ * 0-7 to represent writing different combinations of register values
+ * to REG_BT_COEX_TABLEs. It's for different kinds of coexistence use
+ * cases which Realtek doesn't provide detail for these settings. Keep
+ * this aligned with vendor driver for easier maintenance.
+ */
+static
+void rtl8723bu_set_coex_with_type(struct rtl8xxxu_priv *priv, u8 type)
+{
+ switch (type) {
+ case 0:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 1:
+ case 3:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 2:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 4:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaa5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 5:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaa5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 6:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 7:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
+{
+ struct rtl8xxxu_btcoex *btcoex = &priv->bt_coex;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ btcoex->c2h_bt_inquiry = true;
+ else
+ btcoex->c2h_bt_inquiry = false;
+
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE;
+ btcoex->has_sco = false;
+ btcoex->has_hid = false;
+ btcoex->has_pan = false;
+ btcoex->has_a2dp = false;
+ } else {
+ if ((bt_info & 0x1f) == BT_INFO_8723B_1ANT_B_CONNECTION)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_CONNECTED_IDLE;
+ else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY))
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_SCO_BUSY;
+ else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_ACL_BUSY;
+ else
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_MAX;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ btcoex->has_pan = true;
+ else
+ btcoex->has_pan = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ btcoex->has_hid = true;
+ else
+ btcoex->has_hid = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ btcoex->has_sco = true;
+ else
+ btcoex->has_sco = false;
+ }
+
+ if (!btcoex->has_a2dp && !btcoex->has_sco &&
+ !btcoex->has_pan && btcoex->has_hid)
+ btcoex->hid_only = true;
+ else
+ btcoex->hid_only = false;
+
+ if (!btcoex->has_sco && !btcoex->has_pan &&
+ !btcoex->has_hid && btcoex->has_a2dp)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (btcoex->bt_status == BT_8723B_1ANT_STATUS_SCO_BUSY ||
+ btcoex->bt_status == BT_8723B_1ANT_STATUS_ACL_BUSY)
+ btcoex->bt_busy = true;
+ else
+ btcoex->bt_busy = false;
+}
+
+static
+void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (!wifi_connected) {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ } else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_pan) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x3f, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+}
+
+static
+void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (wifi_connected) {
+ u32 val32 = 0;
+ u32 high_prio_tx = 0, high_prio_rx = 0;
+
+ val32 = rtl8xxxu_read32(priv, 0x770);
+ high_prio_tx = val32 & 0x0000ffff;
+ high_prio_rx = (val32 & 0xffff0000) >> 16;
+
+ if (btcoex->bt_busy) {
+ if (btcoex->hid_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x20,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 5);
+ } else if (btcoex->a2dp_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if ((btcoex->has_a2dp && btcoex->has_pan) ||
+ (btcoex->has_hid && btcoex->has_a2dp &&
+ btcoex->has_pan)) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_hid && btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 3);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ if (high_prio_tx + high_prio_rx <= 60)
+ rtl8723bu_set_coex_with_type(priv, 2);
+ else
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ }
+}
+
+static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv;
+ struct rtl8723bu_c2h *c2h;
+ struct ieee80211_vif *vif;
+ struct device *dev;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int len;
+ u8 bt_info = 0;
+ struct rtl8xxxu_btcoex *btcoex;
+
+ priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ dev = &priv->udev->dev;
+
+ if (priv->rf_paths > 1)
+ goto out;
+
+ while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ skb = __skb_dequeue(&priv->c2hcmd_queue);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ c2h = (struct rtl8723bu_c2h *)skb->data;
+ len = skb->len - 2;
+
+ switch (c2h->id) {
+ case C2H_8723B_BT_INFO:
+ bt_info = c2h->bt_info.bt_info;
+
+ rtl8723bu_update_bt_link_info(priv, bt_info);
+ if (btcoex->c2h_bt_inquiry) {
+ rtl8723bu_handle_bt_inquiry(priv);
+ break;
+ }
+ rtl8723bu_handle_bt_info(priv);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
struct sk_buff *skb)
{
struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
struct device *dev = &priv->udev->dev;
int len;
+ unsigned long flags;
len = skb->len - 2;
@@ -5191,6 +5481,12 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
16, 1, c2h->raw.payload, len, false);
break;
}
+
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ __skb_queue_tail(&priv->c2hcmd_queue, skb);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ schedule_work(&priv->c2hcmd_work);
}
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
@@ -5315,7 +5611,6 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "%s: C2H packet\n", __func__);
rtl8723bu_handle_c2h(priv, skb);
- dev_kfree_skb(skb);
return RX_TYPE_C2H;
}
@@ -5444,6 +5739,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto error;
}
@@ -5464,6 +5760,10 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ if (!priv->vif)
+ priv->vif = vif;
+ else
+ return -EOPNOTSUPP;
rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
@@ -5487,6 +5787,9 @@ static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
+
+ if (priv->vif)
+ priv->vif = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -5772,6 +6075,178 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static u8 rtl8xxxu_signal_to_snr(int signal)
+{
+ if (signal < RTL8XXXU_NOISE_FLOOR_MIN)
+ signal = RTL8XXXU_NOISE_FLOOR_MIN;
+ else if (signal > 0)
+ signal = 0;
+ return (u8)(signal - RTL8XXXU_NOISE_FLOOR_MIN);
+}
+
+static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
+ int signal, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ u16 wireless_mode;
+ u8 rssi_level, ratr_idx;
+ u8 txbw_40mhz;
+ u8 snr, snr_thresh_high, snr_thresh_low;
+ u8 go_up_gap = 5;
+
+ rssi_level = priv->rssi_level;
+ snr = rtl8xxxu_signal_to_snr(signal);
+ snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
+ snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
+ txbw_40mhz = (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) ? 1 : 0;
+
+ switch (rssi_level) {
+ case RTL8XXXU_RATR_STA_MID:
+ snr_thresh_high += go_up_gap;
+ break;
+ case RTL8XXXU_RATR_STA_LOW:
+ snr_thresh_high += go_up_gap;
+ snr_thresh_low += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (snr > snr_thresh_high)
+ rssi_level = RTL8XXXU_RATR_STA_HIGH;
+ else if (snr > snr_thresh_low)
+ rssi_level = RTL8XXXU_RATR_STA_MID;
+ else
+ rssi_level = RTL8XXXU_RATR_STA_LOW;
+
+ if (rssi_level != priv->rssi_level) {
+ int sgi = 0;
+ u32 rate_bitmap = 0;
+
+ rcu_read_lock();
+ rate_bitmap = (sta->supp_rates[0] & 0xfff) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12) |
+ (sta->ht_cap.mcs.rx_mask[1] << 20);
+ if (sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ sgi = 1;
+ rcu_read_unlock();
+
+ wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
+ switch (wireless_mode) {
+ case WIRELESS_MODE_B:
+ ratr_idx = RATEID_IDX_B;
+ if (rate_bitmap & 0x0000000c)
+ rate_bitmap &= 0x0000000d;
+ else
+ rate_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_A:
+ case WIRELESS_MODE_G:
+ ratr_idx = RATEID_IDX_G;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else
+ rate_bitmap &= 0x00000ff0;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G):
+ ratr_idx = RATEID_IDX_BG;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else if (rssi_level == RTL8XXXU_RATR_STA_MID)
+ rate_bitmap &= 0x00000ff0;
+ else
+ rate_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ case (WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_A | WIRELESS_MODE_N_5G):
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_GN_N2SS;
+ else
+ ratr_idx = RATEID_IDX_GN_N1SS;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_B | WIRELESS_MODE_N_24G):
+ if (txbw_40mhz) {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ else
+ ratr_idx = RATEID_IDX_BGN_40M_1SS;
+ } else {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_20M_2SS_BN;
+ else
+ ratr_idx = RATEID_IDX_BGN_20M_1SS_BN;
+ }
+
+ if (priv->tx_paths == 2 && priv->rx_paths == 2) {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x0f8f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x0f8ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x0f8ff015;
+ else
+ rate_bitmap &= 0x0f8ff005;
+ }
+ } else {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x000f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x000ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x000ff015;
+ else
+ rate_bitmap &= 0x000ff005;
+ }
+ }
+ break;
+ default:
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ rate_bitmap &= 0x0fffffff;
+ break;
+ }
+
+ priv->rssi_level = rssi_level;
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ }
+}
+
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ vif = priv->vif;
+
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
+ int signal;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ struct device *dev = &priv->udev->dev;
+
+ dev_dbg(dev, "%s: no sta found\n", __func__);
+ rcu_read_unlock();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ signal = ieee80211_ave_rssi(vif);
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta);
+ }
+
+out:
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
+}
+
static int rtl8xxxu_start(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -5828,6 +6303,8 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
}
+
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
exit:
/*
* Accept all data and mgmt frames
@@ -5879,6 +6356,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_delayed_work_sync(&priv->ra_watchdog);
+
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
}
@@ -6001,7 +6480,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
break;
case 0x7392:
- if (id->idProduct == 0x7811)
+ if (id->idProduct == 0x7811 || id->idProduct == 0xa611)
untested = 0;
break;
case 0x050d:
@@ -6051,6 +6530,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+ INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
+ spin_lock_init(&priv->c2hcmd_lock);
+ INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
+ skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -6205,6 +6688,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa611, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3ebc7c93b1e9..3c96c320236c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -1578,10 +1578,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static int up, dn, m, n, wait_cnt;
- /* 0: no change, +1: increase WiFi duration,
- * -1: decrease WiFi duration
- */
- int result;
u8 retry_cnt = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -1669,7 +1665,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
m = 1;
n = 3;
- result = 0;
wait_cnt = 0;
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
@@ -1679,7 +1674,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
up, dn, m, n, wait_cnt);
- result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
if (retry_cnt == 0) {
@@ -1694,7 +1688,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
n = 3;
up = 0;
dn = 0;
- result = 1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex]Increase wifi duration!!\n");
}
@@ -1718,7 +1711,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Reduce wifi duration for retry<3\n");
}
@@ -1735,7 +1727,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Decrease wifi duration for retryCounter>3!!\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f5739904edf..528e442f25a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -1424,17 +1424,11 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
* -1: decrease WiFi duration
*/
s32 result;
- u8 retry_count = 0, bt_info_ext;
- bool wifi_busy = false;
+ u8 retry_count = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], TdmaDurationAdjustForAcl()\n");
- if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY)
- wifi_busy = true;
- else
- wifi_busy = false;
-
if ((wifi_status ==
BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
(wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN) ||
@@ -1472,7 +1466,6 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
} else {
/* acquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- bt_info_ext = coex_sta->bt_info_ext;
if ((coex_sta->low_priority_tx) > 1050 ||
(coex_sta->low_priority_rx) > 1250)
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 264667203f6f..cef9f2a9303b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -915,7 +915,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
- int continual = true, dataempty = true, result = true;
+ int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
@@ -942,7 +942,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
if (write_state == PG_STATE_HEADER) {
- dataempty = true;
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER\n");
@@ -1179,13 +1178,12 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
- u8 hoffset, hworden;
+ u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index fff8dda14023..e5e1ec5a41dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -68,7 +68,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
bool actionallowed = false;
u16 rfwait_cnt = 0;
@@ -102,8 +101,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
}
}
- rtstate = ppsc->rfpwr_state;
-
switch (state_toset) {
case ERFON:
ppsc->rfoff_reason &= (~changesource);
@@ -161,8 +158,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
if (ppsc->inactive_pwrstate == ERFON &&
rtlhal->interface == INTF_PCI) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
- rtlhal->interface == INTF_PCI) {
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
rtlpriv->intf_ops->disable_aspm(hw);
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index c10432cd703e..8be31e0ad878 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
struct country_code_to_enum_rd *country = NULL;
- if (wiphy == NULL || &rtlpriv->regd == NULL)
+ if (!wiphy)
return -EINVAL;
/* init country_code from efuse channel plan */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 333e355c9281..dceb04a9b3f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -759,14 +759,8 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
}
/* Indicate Rx signal strength to FW. */
- if (rtlpriv->dm.useramask) {
- u8 h2c_parameter[3] = { 0 };
-
- h2c_parameter[2] = (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0x20;
- } else {
+ if (!rtlpriv->dm.useramask)
rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
- }
}
void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 96d8f25b120f..978e6a169654 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -649,7 +649,7 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
int i;
u32 *phy_reg_page;
u16 phy_reg_page_len;
- u32 v1 = 0, v2 = 0, v3 = 0;
+ u32 v1 = 0, v2 = 0;
phy_reg_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN;
phy_reg_page = RTL8188EEPHY_REG_ARRAY_PG;
@@ -658,7 +658,6 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
for (i = 0; i < phy_reg_page_len; i = i + 3) {
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
if (v1 < 0xcdcdcdcd) {
if (phy_reg_page[i] == 0xfe)
@@ -689,13 +688,11 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
while (v2 != 0xDEAD &&
i < phy_reg_page_len - 5) {
i += 3;
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
}
}
}
@@ -769,7 +766,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
@@ -778,7 +774,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
process_path_a(hw, radioa_arraylen, radioa_array_table);
@@ -1940,9 +1935,9 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
struct rtl_phy *rtlphy = &rtlpriv->phy;
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1971,7 +1966,6 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -2014,27 +2008,20 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
rtlphy->reg_eb4 = reg_eb4;
rtlphy->reg_ebc = reg_ebc;
rtlphy->reg_e94 = reg_e94;
rtlphy->reg_e9c = reg_e9c;
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = 0x100;
rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index f2908ee5f860..4bef237f488d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -1649,8 +1649,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- } else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
- rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 0efd19aa4fe5..661249d618c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -1369,8 +1369,8 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1445,21 +1445,17 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 56cc3bc30860..f070f25bb735 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1540,6 +1540,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
* This is maybe necessary:
* rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
*/
+ dev_kfree_skb(skb);
+
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index c7f29a9be50d..146fe144f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1176,6 +1176,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
}
void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
@@ -1185,7 +1186,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1351,7 +1352,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
bcn_interval = mac->beacon_interval;
atim_window = 2;
- /*rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
@@ -1371,9 +1372,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
"beacon_interval:%d\n", bcn_interval);
- /* rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
- /* rtl92de_enable_interrupt(hw); */
+ rtl92de_enable_interrupt(hw);
}
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 99e5cd9a5c86..1dbdddce0823 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -216,6 +216,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
.get_desc = rtl92de_get_desc,
+ .is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
.enable_hw_sec = rtl92de_enable_hw_security_config,
.set_key = rtl92de_set_key,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 2494e1f118f8..92c9fb45f800 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -804,13 +804,15 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
break;
}
} else {
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = GET_RX_DESC_OWN(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = GET_RX_DESC_PKT_LEN(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -821,6 +823,23 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
return ret;
}
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN);
+
+ /* a beacon packet will only use the first
+ * descriptor by defaut, and the own bit may not
+ * be cleared by the hardware
+ */
+ if (own)
+ return false;
+ return true;
+}
+
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 36820070fd76..635989e15282 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -715,6 +715,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
u8 *p_desc, bool istx, u8 desc_name);
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 67305ce915ec..05462422d247 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -108,7 +108,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
- int err;
enum version_8192e version = rtlhal->version;
if (!rtlhal->pfirmware)
@@ -146,9 +145,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
_rtl92ee_write_fw(hw, version, pfwdata, fwsize);
_rtl92ee_enable_fw_download(hw, false);
- err = _rtl92ee_fw_free_to_go(hw);
-
- return 0;
+ return _rtl92ee_fw_free_to_go(hw);
}
static bool _rtl92ee_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 222abc41669c..f03de8bdd2d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2801,8 +2801,8 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac;
- long reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ long reg_e94, reg_e9c, reg_ea4;
+ long reg_eb4, reg_ebc, reg_ec4;
bool is12simular, is13simular, is23simular;
u8 idx;
u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
@@ -2873,11 +2873,9 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
@@ -2886,13 +2884,11 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 27f1a631b569..dc7b515bdc85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -651,7 +651,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
u16 seq_number;
__le16 fc = hdr->frame_control;
- unsigned int buf_len;
u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue);
bool firstseg = ((hdr->seq_ctrl &
cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
@@ -659,7 +658,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
dma_addr_t mapping;
u8 bw_40 = 0;
- u8 short_gi;
__le32 *pdesc = (__le32 *)pdesc8;
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -677,7 +675,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
skb_push(skb, EM_HDR_LEN);
memset(skb->data, 0, EM_HDR_LEN);
}
- buf_len = skb->len;
mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
@@ -724,11 +721,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- if (ptcb_desc->hw_rate > DESC_RATEMCS0)
- short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
- else
- short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
-
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(pdesc, 1);
set_tx_desc_max_agg_num(pdesc, 0x14);
@@ -1010,14 +1002,13 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
- u16 cur_tx_rp, cur_tx_wp;
+ u16 cur_tx_rp;
u32 tmpu32;
tmpu32 =
rtl_read_dword(rtlpriv,
get_desc_addr_fr_q_idx(hw_queue));
cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
- cur_tx_wp = (u16)(tmpu32 & 0x0fff);
/* don't need to update ring->cur_tx_wp */
ring->cur_tx_rp = cur_tx_rp;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
index bb6b60814762..f43331224851 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
@@ -24,192 +24,186 @@
#define TX_DESC_SIZE_RTL8192S (16 * 4)
#define TX_CMDDESC_SIZE_RTL8192S (16 * 4)
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
/* Dword 0 */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GREEN_FIELD(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* Dword 1 */
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 6, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 8, 5, __val)
-#define SET_TX_DESC_ACK_POLICY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 13, 2, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_TX_DESC_NON_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 1, __val)
-#define SET_TX_DESC_KEY_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 17, 2, __val)
-#define SET_TX_DESC_OUI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 19, 1, __val)
-#define SET_TX_DESC_PKT_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 22, 2, __val)
-#define SET_TX_DESC_WDS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 5, __val)
-#define SET_TX_DESC_HWPC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_non_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
/* Dword 2 */
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 6, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 6, 1, __val)
-#define SET_TX_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 7, 5, __val)
-#define SET_TX_DESC_RTS_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 6, __val)
-#define SET_TX_DESC_DATA_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 18, 6, __val)
-#define SET_TX_DESC_RSVD_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(((__pdesc) + 8), 24, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 29, 1, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-#define SET_TX_DESC_OWN_MAC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 31, 1, __val)
+static inline void set_tx_desc_rsvd_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(28, 24));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(29));
+}
/* Dword 3 */
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 12, __val)
-#define SET_TX_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 28, 4, __val)
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
/* Dword 4 */
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 6, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 6, 1, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 7, 4, __val)
-#define SET_TX_DESC_CTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 12, 1, __val)
-#define SET_TX_DESC_RA_BRSR_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 13, 3, __val)
-#define SET_TX_DESC_TXHT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 16, 1, __val)
-#define SET_TX_DESC_TX_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 17, 1, __val)
-#define SET_TX_DESC_TX_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 18, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 19, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 21, 2, __val)
-#define SET_TX_DESC_TX_REVERSE_DIRECTION(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 23, 1, __val)
-#define SET_TX_DESC_RTS_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 24, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 25, 1, __val)
-#define SET_TX_DESC_RTS_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 26, 1, __val)
-#define SET_TX_DESC_RTS_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 27, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 29, 2, __val)
-#define SET_TX_DESC_USER_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 31, 1, __val)
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_cts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_ra_brsr_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(15, 13));
+}
+
+static inline void set_tx_desc_txht(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(16));
+}
+
+static inline void set_tx_desc_tx_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(17));
+}
+
+static inline void set_tx_desc_tx_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(18));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(20, 19));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(28, 27));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(30, 29));
+}
+
+static inline void set_tx_desc_user_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(31));
+}
/* Dword 5 */
-#define SET_TX_DESC_PACKET_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 9, __val)
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 9, 6, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 15, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 16, 5, __val)
-#define SET_TX_DESC_TX_AGC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 21, 11, __val)
-
-/* Dword 6 */
-#define SET_TX_DESC_IP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 16, __val)
-#define SET_TX_DESC_TCP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 16, 16, __val)
+static inline void set_tx_desc_packet_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(8, 0));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(14, 9));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(20, 16));
+}
/* Dword 7 */
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 0, 16, __val)
-#define SET_TX_DESC_IP_HEADER_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 16, 8, __val)
-#define SET_TX_DESC_TCP_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 31, 1, __val)
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
/* Dword 8 */
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 32, 0, 32, __val)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 32, 0, 32)
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 8)));
+}
/* Dword 9 */
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 36, 0, 32, __val)
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 9) = cpu_to_le32(__val);
+}
/* Because the PCI Tx descriptors are chaied at the
* initialization and all the NextDescAddresses in
@@ -226,208 +220,115 @@
#define RX_DRV_INFO_SIZE_UNIT 8
/* DWORD 0 */
-#define SET_RX_STATUS_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_STATUS_DESC_CRC32(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 14, 1, __val)
-#define SET_RX_STATUS_DESC_ICV(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 15, 1, __val)
-#define SET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 4, __val)
-#define SET_RX_STATUS_DESC_SECURITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 20, 3, __val)
-#define SET_RX_STATUS_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 23, 1, __val)
-#define SET_RX_STATUS_DESC_SHIFT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_RX_STATUS_DESC_PHY_STATUS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_RX_STATUS_DESC_SWDEC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_RX_STATUS_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_RX_STATUS_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_RX_STATUS_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_STATUS_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_STATUS_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_STATUS_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_STATUS_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_STATUS_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_STATUS_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_STATUS_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_STATUS_DESC_PHY_STATUS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_STATUS_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_STATUS_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_STATUS_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_STATUS_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_STATUS_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_rx_status_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_status_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_status_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_status_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline u32 get_rx_status_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline u32 get_rx_status_desc_drvinfo_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline u32 get_rx_status_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline u32 get_rx_status_desc_phy_status(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline u32 get_rx_status_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline u32 get_rx_status_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* DWORD 1 */
-#define SET_RX_STATUS_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_RX_STATUS_DESC_TID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 4, __val)
-#define SET_RX_STATUS_DESC_PAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 14, 1, __val)
-#define SET_RX_STATUS_DESC_FAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_RX_STATUS_DESC_A1_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 4, __val)
-#define SET_RX_STATUS_DESC_A2_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 4, __val)
-#define SET_RX_STATUS_DESC_PAM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_RX_STATUS_DESC_PWR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_RX_STATUS_DESC_MOREDATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 1, __val)
-#define SET_RX_STATUS_DESC_MOREFRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
-#define SET_RX_STATUS_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 28, 2, __val)
-#define SET_RX_STATUS_DESC_MC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 30, 1, __val)
-#define SET_RX_STATUS_DESC_BC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 31, 1, __val)
-
-#define GET_RX_STATUS_DEC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 0, 5)
-#define GET_RX_STATUS_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 5, 4)
-#define GET_RX_STATUS_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 14, 1)
-#define GET_RX_STATUS_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 15, 1)
-#define GET_RX_STATUS_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 16, 4)
-#define GET_RX_STATUS_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 20, 4)
-#define GET_RX_STATUS_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 24, 1)
-#define GET_RX_STATUS_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 25, 1)
-#define GET_RX_STATUS_DESC_MORE_DATA(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 26, 1)
-#define GET_RX_STATUS_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 27, 1)
-#define GET_RX_STATUS_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 28, 2)
-#define GET_RX_STATUS_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 30, 1)
-#define GET_RX_STATUS_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 31, 1)
-
-/* DWORD 2 */
-#define SET_RX_STATUS_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 12, __val)
-#define SET_RX_STATUS_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 4, __val)
-#define SET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 16, 8, __val)
-#define SET_RX_STATUS_DESC_NEXT_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-
-#define GET_RX_STATUS_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 0, 12)
-#define GET_RX_STATUS_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 12, 4)
-#define GET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 16, 8)
-#define GET_RX_STATUS_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 30, 1)
+static inline u32 get_rx_status_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
/* DWORD 3 */
-#define SET_RX_STATUS_DESC_RX_MCS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 6, __val)
-#define SET_RX_STATUS_DESC_RX_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 6, 1, __val)
-#define SET_RX_STATUS_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 7, 1, __val)
-#define SET_RX_STATUS_DESC_SPLCP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 1, __val)
-#define SET_RX_STATUS_DESC_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 9, 1, __val)
-#define SET_RX_STATUS_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 10, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 11, 1, __val)
-#define SET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 12, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 13, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_ERR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 14, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 15, 1, __val)
-#define SET_RX_STATUS_DESC_IV0(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 16, __val)
-
-#define GET_RX_STATUS_DESC_RX_MCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 0, 6)
-#define GET_RX_STATUS_DESC_RX_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 6, 1)
-#define GET_RX_STATUS_DESC_AMSDU(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 7, 1)
-#define GET_RX_STATUS_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 8, 1)
-#define GET_RX_STATUS_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 9, 1)
-#define GET_RX_STATUS_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 10, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 11, 1)
-#define GET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 12, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 13, 1)
-#define GET_RX_STATUS_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 14, 1)
-#define GET_RX_STATUS_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 15, 1)
-#define GET_RX_STATUS_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 16, 16)
-
-/* DWORD 4 */
-#define SET_RX_STATUS_DESC_IV1(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 32, __val)
-#define GET_RX_STATUS_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 16, 0, 32)
+static inline u32 get_rx_status_desc_rx_mcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_status_desc_rx_ht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_status_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_status_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
/* DWORD 5 */
-#define SET_RX_STATUS_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 32, __val)
-#define GET_RX_STATUS_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 20, 0, 32)
+static inline u32 get_rx_status_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 5)));
+}
/* DWORD 6 */
-#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
-#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
+static inline void set_rx_status__desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_status_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
+ (get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE1M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE2M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE5_5M ||\
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 541b7881735e..47a5b95ca2b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -442,17 +442,20 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
memset((ph2c_buffer + totallen + tx_desclen), 0, len);
/* CMD len */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 0, 16, pcmd_len[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pcmd_len[i],
+ GENMASK(15, 0));
/* CMD ID */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 16, 8, pelement_id[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pelement_id[i],
+ GENMASK(23, 16));
/* CMD Sequence */
*cmd_start_seq = *cmd_start_seq % 0x80;
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 24, 7, *cmd_start_seq);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), *cmd_start_seq,
+ GENMASK(30, 24));
++*cmd_start_seq;
/* Copy memory */
@@ -462,8 +465,9 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
/* CMD continue */
/* set the continue in prevoius cmd. */
if (i < cmd_num - 1)
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + pre_continueoffset),
- 31, 1, 1);
+ le32p_replace_bits((__le32 *)(ph2c_buffer +
+ pre_continueoffset),
+ 1, BIT(31));
pre_continueoffset = totallen;
@@ -559,8 +563,8 @@ void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
pwrmode.flag_dps_en = 0;
pwrmode.bcn_rx_en = 0;
pwrmode.bcn_to = 0;
- SET_BITS_TO_LE_2BYTE((u8 *)(&pwrmode) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
+ le16p_replace_bits((__le16 *)(((u8 *)(&pwrmode) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
pwrmode.app_itv = 0;
pwrmode.awake_bcn_itvl = ppsc->reg_max_lps_awakeintvl;
pwrmode.smart_ps = 1;
@@ -602,9 +606,10 @@ void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
joinbss_rpt.bssid[3] = mac->bssid[3];
joinbss_rpt.bssid[4] = mac->bssid[4];
joinbss_rpt.bssid[5] = mac->bssid[5];
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 10, 0, 16, mac->assoc_id);
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 10)),
+ mac->assoc_id, GENMASK(15, 0));
_rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_JOINBSSRPT, (u8 *)&joinbss_rpt);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index efb432c6d785..9eaa5348b556 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -33,7 +33,7 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
}
static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats, u8 *pdesc,
+ struct rtl_stats *pstats, __le32 *pdesc,
struct rx_fwinfo *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
@@ -193,11 +193,10 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb, struct rtl_stats *pstats,
- u8 *pdesc, struct rx_fwinfo *p_drvinfo)
+ __le32 *pdesc, struct rx_fwinfo *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
@@ -232,29 +231,30 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
}
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status, u8 *pdesc,
+ struct ieee80211_rx_status *rx_status, u8 *pdesc8,
struct sk_buff *skb)
{
struct rx_fwinfo *p_drvinfo;
- u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = (u32)get_rx_status_desc_phy_status(pdesc);
struct ieee80211_hdr *hdr;
- stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
- stats->rx_bufshift = (u8)(GET_RX_STATUS_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16)GET_RX_STATUS_DESC_ICV(pdesc);
- stats->crc = (u16)GET_RX_STATUS_DESC_CRC32(pdesc);
+ stats->length = (u16)get_rx_status_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_status_desc_drvinfo_size(pdesc) * 8;
+ stats->rx_bufshift = (u8)(get_rx_status_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_status_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_status_desc_crc32(pdesc);
stats->hwerror = (u16)(stats->crc | stats->icv);
- stats->decrypted = !GET_RX_STATUS_DESC_SWDEC(pdesc);
-
- stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
- stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
- stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
+ stats->decrypted = !get_rx_status_desc_swdec(pdesc);
+
+ stats->rate = (u8)get_rx_status_desc_rx_mcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_status_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_status_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_status_desc_paggr(pdesc) == 1) &&
+ (get_rx_status_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_status_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_status_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_status_desc_rx_ht(pdesc);
stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
if (stats->hwerror)
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -320,7 +320,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
@@ -360,13 +360,13 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
if (rtlpriv->dm.useramask) {
/* set txdesc macId */
if (ptcb_desc->mac_id < 32) {
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
reserved_macid |= ptcb_desc->mac_id;
}
}
- SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
+ set_tx_desc_rsvd_macid(pdesc, reserved_macid);
- SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
+ set_tx_desc_txht(pdesc, ((ptcb_desc->hw_rate >=
DESC_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
@@ -378,31 +378,32 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_TX_SHORT(pdesc, 0);
+ set_tx_desc_tx_short(pdesc, 0);
/* Aggregation related */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ set_tx_desc_agg_enable(pdesc, 1);
/* For AMPDU, we must insert SSN into TX_DESC */
- SET_TX_DESC_SEQ(pdesc, seq_number);
+ set_tx_desc_seq(pdesc, seq_number);
/* Protection mode related */
/* For 92S, if RTS/CTS are set, HW will execute RTS. */
/* We choose only one protection mode to execute */
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS_ENABLE(pdesc, ((ptcb_desc->cts_enable) ?
+ set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ?
+ 1 : 0));
+ set_tx_desc_cts_enable(pdesc, ((ptcb_desc->cts_enable) ?
1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bandwidth(pdesc, 0);
+ set_tx_desc_rts_sub_carrier(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -411,27 +412,27 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* Set Bandwidth and sub-channel settings. */
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 1);
+ set_tx_desc_tx_bandwidth(pdesc, 1);
/* use duplicated mode */
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
/* 3 Fill necessary field in First Descriptor */
/*DWORD 0*/
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_OFFSET(pdesc, 32);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_offset(pdesc, 32);
+ set_tx_desc_pkt_size(pdesc, (u16)skb->len);
/*DWORD 1*/
- SET_TX_DESC_RA_BRSR_ID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_ra_brsr_id(pdesc, ptcb_desc->ratr_index);
/* Fill security related */
if (info->control.hw_key) {
@@ -441,62 +442,63 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x2);
+ set_tx_desc_sec_type(pdesc, 0x2);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
/* Set Packet ID */
- SET_TX_DESC_PACKET_ID(pdesc, 0);
+ set_tx_desc_packet_id(pdesc, 0);
/* We will assign magement queue to BK. */
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
/* Alwasy enable all rate fallback range */
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
/* Fix: I don't kown why hw use 6.5M to tx when set it */
- SET_TX_DESC_USER_RATE(pdesc,
+ set_tx_desc_user_rate(pdesc,
ptcb_desc->use_driver_rate ? 1 : 0);
/* Set NON_QOS bit. */
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_NON_QOS(pdesc, 1);
+ set_tx_desc_non_qos(pdesc, 1);
}
/* Fill fields that are required to be initialized
* in all of the descriptors */
/*DWORD 0 */
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
/* DWORD 7 */
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
/* DOWRD 8 */
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
-void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg, struct sk_buff *skb)
+void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
+ bool firstseg, bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -512,53 +514,55 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
/* For firmware downlaod we only need to set LINIP */
- SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);
+ set_tx_desc_linip(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
/* 92SE need not to set TX packet size when firmware download */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
} else { /* H2C Command Desc format (Host TXCMD) */
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
/* Buffer size + command header */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
/* Fixed queue of H2C command */
- SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);
-
- SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
+ set_tx_desc_queue_sel(pdesc, 0x13);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ le32p_replace_bits((__le32 *)skb->data, rtlhal->h2c_txcmd_seq,
+ GENMASK(30, 24));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
}
-void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -569,16 +573,16 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_STATUS_DESC_OWN(pdesc, 1);
+ set_rx_status_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_STATUS__DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_status__desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_STATUS_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_status_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_STATUS_DESC_EOR(pdesc, 1);
+ set_rx_status_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
@@ -589,17 +593,18 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92se_get_desc(struct ieee80211_hw *hw,
- u8 *desc, bool istx, u8 desc_name)
+ u8 *desc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *desc = (__le32 *)desc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(desc);
+ ret = get_tx_desc_own(desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
+ ret = get_tx_desc_tx_buffer_address(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -609,13 +614,13 @@ u64 rtl92se_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_STATUS_DESC_OWN(desc);
+ ret = get_rx_status_desc_own(desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
+ ret = get_rx_status_desc_pkt_len(desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
+ ret = get_rx_status_desc_buff_addr(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 54a3aec1dfa7..1385e5a2e0b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -485,15 +485,12 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- rtstatus = true;
-
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -1341,9 +1338,9 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1372,7 +1369,6 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -1412,23 +1408,16 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index aa8a0950fcea..d48364460922 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -2251,8 +2251,8 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate, idx;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4;
- long reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4;
+ long reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -2334,11 +2334,9 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
@@ -2346,13 +2344,11 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 18ce2856a91b..37036e653e56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -223,7 +223,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
struct sk_buff *pskb = NULL;
- u8 own;
unsigned long flags;
ring = &rtlpci->tx_ring[BEACON_QUEUE];
@@ -233,9 +232,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true,
- HW_DESC_OWN);
-
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
__skb_queue_tail(&ring->queue, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 979e434a4e73..c1a04efa69ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2076,7 +2076,6 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table_a, *radioa_array_table_b;
u16 radioa_arraylen_a, radioa_arraylen_b;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2088,7 +2087,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2111,7 +2109,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2121,7 +2118,6 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2351,7 +2347,7 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
short band_temp = -1, regulation = -1, bandwidth_temp = -1,
rate_section = -1, channel_temp = -1;
- u16 bd, regu, bdwidth, sec, chnl;
+ u16 regu, bdwidth, sec, chnl;
s8 power_limit = MAX_POWER_INDEX;
if (rtlefuse->eeprom_regulatory == 2)
@@ -2472,7 +2468,6 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
return MAX_POWER_INDEX;
}
- bd = band_temp;
regu = regulation;
bdwidth = bandwidth_temp;
sec = rate_section;
@@ -3553,8 +3548,6 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
if (36 <= channel && channel <= 64)
data = 0x114E9;
- else if (100 <= channel && channel <= 140)
- data = 0x110E9;
else
data = 0x110E9;
rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 4b59f3b46b28..348b0072cdd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1021,8 +1021,10 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->hw = hw;
rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
GFP_KERNEL);
- if (!rtlpriv->usb_data)
+ if (!rtlpriv->usb_data) {
+ ieee80211_free_hw(hw);
return -ENOMEM;
+ }
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
@@ -1083,6 +1085,7 @@ error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
+ kfree(rtlpriv->usb_data);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 77edee2df8b8..15e12155a04c 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -14,6 +14,7 @@ rtw88-y += main.o \
fw.o \
ps.o \
sec.o \
+ bf.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
new file mode 100644
index 000000000000..fda771d23f71
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#include "main.h"
+#include "reg.h"
+#include "bf.h"
+#include "debug.h"
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ if (bfee->role == RTW_BFEE_NONE)
+ return;
+
+ if (bfee->role == RTW_BFEE_MU)
+ bfinfo->bfer_mu_cnt--;
+ else if (bfee->role == RTW_BFEE_SU)
+ bfinfo->bfer_su_cnt--;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, false);
+
+ bfee->role = RTW_BFEE_NONE;
+}
+
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_sta *sta;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_sta_vht_cap *ic_vht_cap;
+ const u8 *bssid = bss_conf->bssid;
+ u32 sound_dim;
+ u8 bfee_role = RTW_BFEE_NONE;
+ u8 i;
+
+ if (!(chip->band & RTW_BAND_5G))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, bssid);
+ if (!sta) {
+ rtw_warn(rtwdev, "failed to find station entry for bss %pM\n",
+ bssid);
+ goto out_unlock;
+ }
+
+ ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
+ vht_cap = &sta->vht_cap;
+
+ if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_MU;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfee->aid = bss_conf->aid;
+ bfinfo->bfer_mu_cnt++;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ } else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ sound_dim = vht_cap->cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_SU;
+ bfee->sound_dim = (u8)sound_dim;
+ bfee->g_id = 0;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfinfo->bfer_su_cnt++;
+ for (i = 0; i < chip->bfer_su_max_num; i++) {
+ if (!test_bit(i, bfinfo->bfer_su_reg_maping)) {
+ set_bit(i, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = i;
+ break;
+ }
+ }
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ }
+
+out_unlock:
+ bfee->role = bfee_role;
+ rcu_read_unlock();
+}
+
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param)
+{
+ u16 mu_bf_ctl = 0;
+ u8 *addr = param->bfer_address;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, REG_ASSOCIATED_BFMER0_INFO + i, addr[i]);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 6, param->paid);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, param->csi_para);
+
+ mu_bf_ctl = rtw_read16(rtwdev, REG_WMAC_MU_BF_CTL) & 0xC000;
+ mu_bf_ctl |= param->my_aid | (param->csi_length_sel << 12);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, mu_bf_ctl);
+}
+
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate)
+{
+ u32 psf_ctl = 0;
+ u8 csi_rsc = 0x1;
+
+ psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL);
+
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl & ~BIT(12));
+}
+
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param)
+{
+ u8 mu_tbl_sel;
+ u8 mu_valid;
+
+ mu_valid = rtw_read8(rtwdev, REG_MU_TX_CTL) &
+ ~BIT_MASK_R_MU_TABLE_VALID;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL,
+ (mu_valid | BIT(0) | BIT(1)) & ~(BIT(7)));
+
+ mu_tbl_sel = rtw_read8(rtwdev, REG_MU_TX_CTL + 1) & 0xF8;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[1]);
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel | 1);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[1]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[2]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[3]);
+}
+
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ rtw_write8(rtwdev, REG_MU_TX_CTL, 0);
+}
+
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, 0);
+}
+
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 nc_index = 1;
+ u8 nr_index = bfee->sound_dim;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 3;
+ u32 addr_bfer_info, addr_csi_rpt, csi_param;
+ u8 i;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an su bfee\n");
+
+ switch (bfee->su_reg_index) {
+ case 1:
+ addr_bfer_info = REG_ASSOCIATED_BFMER1_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20 + 2;
+ break;
+ case 0:
+ default:
+ addr_bfer_info = REG_ASSOCIATED_BFMER0_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20;
+ break;
+ }
+
+ /* Sounding protocol control */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+
+ /* MAC address/Partial AID of Beamformer */
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, addr_bfer_info + i, bfee->mac_addr[i]);
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+ rtw_write16(rtwdev, addr_csi_rpt, csi_param);
+
+ /* ndp rx standby timer */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME);
+}
+
+/* nc index: 1 2T2R 0 1T1R
+ * nr index: 1 use Nsts 0 use reg setting
+ * codebookinfo: 1 802.11ac 3 802.11n
+ */
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct mu_bfer_init_para param;
+ u8 nc_index = 1, nr_index = 1;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 0;
+ u32 csi_param;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an mu bfee\n");
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "nc=%d nr=%d group=%d codebookinfo=%d coefficientsize=%d\n",
+ nc_index, nr_index, grouping, codebookinfo,
+ coefficientsize);
+
+ param.paid = bfee->p_aid;
+ param.csi_para = csi_param;
+ param.my_aid = bfee->aid & 0xfff;
+ param.csi_length_sel = HAL_CSI_SEG_4K;
+ ether_addr_copy(param.bfer_address, bfee->mac_addr);
+
+ rtw_bf_init_bfer_entry_mu(rtwdev, &param);
+
+ bf_info->cur_csi_rpt_rate = DESC_RATE6M;
+ rtw_bf_cfg_sounding(rtwdev, vif, DESC_RATE6M);
+
+ /* accept action_no_ack */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP0, BIT_RXFLTMAP0_ACTIONNOACK);
+
+ /* accept NDPA and BF report poll */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF);
+}
+
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n");
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ switch (bfee->su_reg_index) {
+ case 0:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, 0);
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER1_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER1_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20 + 2, 0);
+ break;
+ }
+
+ clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = 0xFF;
+}
+
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ rtw_bf_del_bfer_entry_mu(rtwdev);
+
+ if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0)
+ rtw_bf_del_sounding(rtwdev);
+}
+
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct cfg_mumimo_para param;
+
+ if (bfee->role != RTW_BFEE_MU) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "this vif is not mu bfee\n");
+ return;
+ }
+
+ param.grouping_bitmap = 0;
+ param.mu_tx_en = 0;
+ memset(param.sounding_sts, 0, 6);
+ memcpy(param.given_gid_tab, conf->mu_group.membership, 8);
+ memcpy(param.given_user_pos, conf->mu_group.position, 16);
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA0: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[0], param.given_user_pos[0],
+ param.given_user_pos[1]);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA1: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[1], param.given_user_pos[2],
+ param.given_user_pos[3]);
+
+ rtw_bf_cfg_mu_bfee(rtwdev, &param);
+}
+
+void rtw_bf_phy_init(struct rtw_dev *rtwdev)
+{
+ u8 tmp8;
+ u32 tmp32;
+ u8 retry_limit = 0xA;
+ u8 ndpa_rate = 0x10;
+ u8 ack_policy = 3;
+
+ tmp32 = rtw_read32(rtwdev, REG_MU_TX_CTL);
+ /* Enable P1 aggr new packet according to P0 transfer time */
+ tmp32 |= BIT_MU_P1_WAIT_STATE_EN;
+ /* MU Retry Limit */
+ tmp32 &= ~BIT_MASK_R_MU_RL;
+ tmp32 |= (retry_limit << BIT_SHIFT_R_MU_RL) & BIT_MASK_R_MU_RL;
+ /* Disable Tx MU-MIMO until sounding done */
+ tmp32 &= ~BIT_EN_MU_MIMO;
+ /* Clear validity of MU STAs */
+ tmp32 &= ~BIT_MASK_R_MU_TABLE_VALID;
+ rtw_write32(rtwdev, REG_MU_TX_CTL, tmp32);
+
+ /* MU-MIMO Option as default value */
+ tmp8 = ack_policy << BIT_SHIFT_WMAC_TXMU_ACKPOLICY;
+ tmp8 |= BIT_WMAC_TXMU_ACKPOLICY_EN;
+ rtw_write8(rtwdev, REG_WMAC_MU_BF_OPTION, tmp8);
+
+ /* MU-MIMO Control as default value */
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ /* Set MU NDPA rate & BW source */
+ rtw_write32_set(rtwdev, REG_TXBF_CTRL, BIT_USE_NDPA_PARAMETER);
+ /* Set NDPA Rate */
+ rtw_write8(rtwdev, REG_NDPA_OPT_CTRL, ndpa_rate);
+
+ rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE,
+ DESC_RATE6M);
+}
+
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate)
+{
+ u32 csi_cfg;
+ u16 cur_rrsr;
+
+ csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE;
+ cur_rrsr = rtw_read16(rtwdev, REG_RRSR);
+
+ if (rssi >= 40) {
+ if (cur_rate != DESC_RATE54M) {
+ cur_rrsr |= BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE54M;
+ } else {
+ if (cur_rate != DESC_RATE24M) {
+ cur_rrsr &= ~BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE24M;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
new file mode 100644
index 000000000000..96a8216dd11f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#ifndef __RTW_BF_H_
+#define __RTW_BF_H_
+
+#define REG_TXBF_CTRL 0x042C
+#define REG_RRSR 0x0440
+#define REG_NDPA_OPT_CTRL 0x045F
+
+#define REG_ASSOCIATED_BFMER0_INFO 0x06E4
+#define REG_ASSOCIATED_BFMER1_INFO 0x06EC
+#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4
+#define REG_SND_PTCL_CTRL 0x0718
+#define REG_MU_TX_CTL 0x14C0
+#define REG_MU_STA_GID_VLD 0x14C4
+#define REG_MU_STA_USER_POS_INFO 0x14C8
+#define REG_CSI_RRSR 0x1678
+#define REG_WMAC_MU_BF_OPTION 0x167C
+#define REG_WMAC_MU_BF_CTL 0x1680
+
+#define BIT_WMAC_USE_NDPARATE BIT(30)
+#define BIT_WMAC_TXMU_ACKPOLICY_EN BIT(6)
+#define BIT_USE_NDPA_PARAMETER BIT(30)
+#define BIT_MU_P1_WAIT_STATE_EN BIT(16)
+#define BIT_EN_MU_MIMO BIT(7)
+
+#define R_MU_RL 0xf
+#define BIT_SHIFT_R_MU_RL 12
+#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4
+#define BIT_SHIFT_CSI_RATE 24
+
+#define BIT_MASK_R_MU_RL (R_MU_RL << BIT_SHIFT_R_MU_RL)
+#define BIT_MASK_R_MU_TABLE_VALID 0x3f
+#define BIT_MASK_CSI_RATE_VAL 0x3F
+#define BIT_MASK_CSI_RATE (BIT_MASK_CSI_RATE_VAL << BIT_SHIFT_CSI_RATE)
+
+#define BIT_RXFLTMAP0_ACTIONNOACK BIT(14)
+#define BIT_RXFLTMAP1_BF (BIT(4) | BIT(5))
+#define BIT_RXFLTMAP1_BF_REPORT_POLL BIT(4)
+#define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4)
+
+#define RTW_NDP_RX_STANDBY_TIME 0x70
+#define RTW_SND_CTRL_REMOVE 0xD8
+#define RTW_SND_CTRL_SOUNDING 0xDB
+
+enum csi_seg_len {
+ HAL_CSI_SEG_4K = 0,
+ HAL_CSI_SEG_8K = 1,
+ HAL_CSI_SEG_11K = 2,
+};
+
+struct cfg_mumimo_para {
+ u8 sounding_sts[6];
+ u16 grouping_bitmap;
+ u8 mu_tx_en;
+ u32 given_gid_tab[2];
+ u32 given_user_pos[4];
+};
+
+struct mu_bfer_init_para {
+ u16 paid;
+ u16 csi_para;
+ u16 my_aid;
+ enum csi_seg_len csi_length_sel;
+ u8 bfer_address[ETH_ALEN];
+};
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param);
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate);
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param);
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev);
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev);
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+void rtw_bf_phy_init(struct rtw_dev *rtwdev);
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 793b40bdbf7c..4dfb2ec395ee 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -383,9 +383,9 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
u8 rssi_step;
u8 rssi;
- scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING);
+ scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags);
coex_stat->wl_connected = !!rtwdev->sta_cnt;
- coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
if (stats->tx_throughput > stats->rx_throughput)
coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
@@ -810,8 +810,6 @@ static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable)
static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
u8 lps_val, u8 rpwm_val)
{
- struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 lps_mode = 0x0;
@@ -823,18 +821,14 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
/* recover to original 32k low power setting */
coex_stat->wl_force_lps_ctrl = false;
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
case COEX_PS_LPS_OFF:
coex_stat->wl_force_lps_ctrl = true;
if (lps_mode)
rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0);
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
default:
break;
@@ -1308,6 +1302,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
coex_stat->wl_hi_pri_task2)
@@ -1318,14 +1313,16 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
if (wl_hi_pri) {
table_case = 15;
if (coex_stat->bt_a2dp_exist &&
- !coex_stat->bt_pan_exist)
+ !coex_stat->bt_pan_exist) {
+ slot_type = TDMA_4SLOT;
tdma_case = 11;
- else if (coex_stat->wl_hi_pri_task1)
+ } else if (coex_stat->wl_hi_pri_task1) {
tdma_case = 6;
- else if (!coex_stat->bt_page)
+ } else if (!coex_stat->bt_page) {
tdma_case = 8;
- else
+ } else {
tdma_case = 9;
+ }
} else if (coex_stat->wl_connected) {
table_case = 10;
tdma_case = 10;
@@ -1361,7 +1358,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
@@ -1475,13 +1472,13 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
table_case = 10;
else
table_case = 9;
- slot_type = TDMA_4SLOT;
-
if (coex_stat->wl_gl_busy)
tdma_case = 13;
else
@@ -1585,13 +1582,14 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->bt_ble_exist)
table_case = 26;
else
table_case = 9;
if (coex_stat->wl_gl_busy) {
- slot_type = TDMA_4SLOT;
tdma_case = 13;
} else {
tdma_case = 14;
@@ -1794,10 +1792,12 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (efuse->share_ant) {
/* Shared-Ant */
if (coex_stat->bt_a2dp_exist) {
+ slot_type = TDMA_4SLOT;
table_case = 9;
tdma_case = 11;
} else {
@@ -1818,7 +1818,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 6ad985e98e42..5a181e01ebef 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -498,12 +498,32 @@ static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate)
seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n);
}
+static void rtw_print_rate(struct seq_file *m, u8 rate)
+{
+ switch (rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ rtw_print_cck_rate_txt(m, rate);
+ break;
+ case DESC_RATE6M...DESC_RATE54M:
+ rtw_print_ofdm_rate_txt(m, rate);
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS15:
+ rtw_print_ht_rate_txt(m, rate);
+ break;
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rtw_print_vht_rate_txt(m, rate);
+ break;
+ default:
+ seq_printf(m, " Unknown rate=0x%x\n", rate);
+ break;
+ }
+}
+
static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- void (*print_rate)(struct seq_file *, u8) = NULL;
u8 path, rate;
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
@@ -528,30 +548,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
rate < DESC_RATEVHT1SS_MCS0)
continue;
- switch (rate) {
- case DESC_RATE1M...DESC_RATE11M:
- print_rate = rtw_print_cck_rate_txt;
- break;
- case DESC_RATE6M...DESC_RATE54M:
- print_rate = rtw_print_ofdm_rate_txt;
- break;
- case DESC_RATEMCS0...DESC_RATEMCS15:
- print_rate = rtw_print_ht_rate_txt;
- break;
- case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
- print_rate = rtw_print_vht_rate_txt;
- break;
- default:
- print_rate = NULL;
- break;
- }
-
rtw_get_tx_power_params(rtwdev, path, rate, bw,
ch, regd, &pwr_param);
seq_printf(m, "%4c ", path + 'A');
- if (print_rate)
- print_rate(m, rate);
+ rtw_print_rate(m, rate);
seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
hal->tx_pwr_tbl[path][rate],
hal->tx_pwr_tbl[path][rate],
@@ -567,6 +568,132 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
return 0;
}
+static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_pkt_count *last_cnt = &dm_info->last_pkt_count;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct ewma_evm *ewma_evm = dm_info->ewma_evm;
+ struct ewma_snr *ewma_snr = dm_info->ewma_snr;
+ u8 ss, rate_id;
+
+ seq_puts(m, "==========[Common Info]========\n");
+ seq_printf(m, "Is link = %c\n", rtw_is_assoc(rtwdev) ? 'Y' : 'N');
+ seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
+ seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
+ seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ stats->tx_throughput, stats->rx_throughput);
+
+ seq_puts(m, "==========[Tx Phy Info]========\n");
+ seq_puts(m, "[Tx Rate] = ");
+ rtw_print_rate(m, dm_info->tx_rate);
+ seq_printf(m, "(0x%x)\n\n", dm_info->tx_rate);
+
+ seq_puts(m, "==========[Rx Phy Info]========\n");
+ seq_printf(m, "[Rx Beacon Count] = %u\n", last_cnt->num_bcn_pkt);
+ seq_puts(m, "[Rx Rate] = ");
+ rtw_print_rate(m, dm_info->curr_rx_rate);
+ seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate);
+
+ seq_puts(m, "[Rx Rate Count]:\n");
+ seq_printf(m, " * CCK = {%u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE1M],
+ last_cnt->num_qry_pkt[DESC_RATE2M],
+ last_cnt->num_qry_pkt[DESC_RATE5_5M],
+ last_cnt->num_qry_pkt[DESC_RATE11M]);
+
+ seq_printf(m, " * OFDM = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE6M],
+ last_cnt->num_qry_pkt[DESC_RATE9M],
+ last_cnt->num_qry_pkt[DESC_RATE12M],
+ last_cnt->num_qry_pkt[DESC_RATE18M],
+ last_cnt->num_qry_pkt[DESC_RATE24M],
+ last_cnt->num_qry_pkt[DESC_RATE36M],
+ last_cnt->num_qry_pkt[DESC_RATE48M],
+ last_cnt->num_qry_pkt[DESC_RATE54M]);
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEMCS0 + ss * 8;
+ seq_printf(m, " * HT_MCS[%u:%u] = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss * 8, ss * 8 + 7,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7]);
+ }
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEVHT1SS_MCS0 + ss * 10;
+ seq_printf(m, " * VHT_MCS-%uss MCS[0:9] = {%u, %u, %u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss + 1,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7],
+ last_cnt->num_qry_pkt[rate_id + 8],
+ last_cnt->num_qry_pkt[rate_id + 9]);
+ }
+
+ seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n",
+ dm_info->rssi[RF_PATH_A] - 100,
+ dm_info->rssi[RF_PATH_B] - 100);
+ seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n",
+ dm_info->rx_evm_dbm[RF_PATH_A],
+ dm_info->rx_evm_dbm[RF_PATH_B]);
+ seq_printf(m, "[Rx SNR] = {%d, %d}\n",
+ dm_info->rx_snr[RF_PATH_A],
+ dm_info->rx_snr[RF_PATH_B]);
+ seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n",
+ dm_info->cfo_tail[RF_PATH_A],
+ dm_info->cfo_tail[RF_PATH_B]);
+
+ if (dm_info->curr_rx_rate >= DESC_RATE11M) {
+ seq_puts(m, "[Rx Average Status]:\n");
+ seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_OFDM_A]));
+ seq_printf(m, " * 1SS, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_1SS]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_1SS_A]));
+ seq_printf(m, " * 2SS, EVM: {-%d, -%d}, SNR: {%d, %d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_A]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B]));
+ }
+
+ seq_puts(m, "[Rx Counter]:\n");
+ seq_printf(m, " * CCA (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_cca_cnt,
+ dm_info->ofdm_cca_cnt,
+ dm_info->total_cca_cnt);
+ seq_printf(m, " * False Alarm (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_fa_cnt,
+ dm_info->ofdm_fa_cnt,
+ dm_info->total_fa_cnt);
+ seq_printf(m, " * CCK cnt (ok, err) = (%u, %u)\n",
+ dm_info->cck_ok_cnt, dm_info->cck_err_cnt);
+ seq_printf(m, " * OFDM cnt (ok, err) = (%u, %u)\n",
+ dm_info->ofdm_ok_cnt, dm_info->ofdm_err_cnt);
+ seq_printf(m, " * HT cnt (ok, err) = (%u, %u)\n",
+ dm_info->ht_ok_cnt, dm_info->ht_err_cnt);
+ seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n",
+ dm_info->vht_ok_cnt, dm_info->vht_err_cnt);
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -653,6 +780,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
.cb_read = rtw_debugfs_get_rsvd_page,
};
+static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
+ .cb_read = rtw_debugfs_get_phy_info,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -682,6 +813,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(rf_read);
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
+ rtw_debugfs_add_r(phy_info);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 45851cbbd2ab..cd28f675e9cb 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -16,6 +16,8 @@ enum rtw_debug_mask {
RTW_DBG_RFK = 0x00000080,
RTW_DBG_REGD = 0x00000100,
RTW_DBG_DEBUGFS = 0x00000200,
+ RTW_DBG_PS = 0x00000400,
+ RTW_DBG_BF = 0x00000800,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b082e2cc95f5..b8c581161f61 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -7,7 +7,9 @@
#include "fw.h"
#include "tx.h"
#include "reg.h"
+#include "sec.h"
#include "debug.h"
+#include "util.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -27,6 +29,100 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
}
}
+static u16 get_max_amsdu_len(u32 bit_rate)
+{
+ /* lower than ofdm, do not aggregate */
+ if (bit_rate < 550)
+ return 1;
+
+ /* lower than 20M 2ss mcs8, make it small */
+ if (bit_rate < 1800)
+ return 1200;
+
+ /* lower than 40M 2ss mcs9, make it medium */
+ if (bit_rate < 4000)
+ return 2600;
+
+ /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
+ if (bit_rate < 7000)
+ return 3500;
+
+ /* unlimited */
+ return 0;
+}
+
+struct rtw_fw_iter_ra_data {
+ struct rtw_dev *rtwdev;
+ u8 *payload;
+};
+
+static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_fw_iter_ra_data *ra_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ u8 mac_id, rate, sgi, bw;
+ u8 mcs, nss;
+ u32 bit_rate;
+
+ mac_id = GET_RA_REPORT_MACID(ra_data->payload);
+ if (si->mac_id != mac_id)
+ return;
+
+ si->ra_report.txrate.flags = 0;
+
+ rate = GET_RA_REPORT_RATE(ra_data->payload);
+ sgi = GET_RA_REPORT_SGI(ra_data->payload);
+ bw = GET_RA_REPORT_BW(ra_data->payload);
+
+ if (rate < DESC_RATEMCS0) {
+ si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
+ goto legacy;
+ }
+
+ rtw_desc_to_mcsrate(rate, &mcs, &nss);
+ if (rate >= DESC_RATEVHT1SS_MCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate >= DESC_RATEMCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ if (rate >= DESC_RATEMCS0) {
+ si->ra_report.txrate.mcs = mcs;
+ si->ra_report.txrate.nss = nss;
+ }
+
+ if (sgi)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (bw == RTW_CHANNEL_WIDTH_80)
+ si->ra_report.txrate.bw = RATE_INFO_BW_80;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ si->ra_report.txrate.bw = RATE_INFO_BW_40;
+ else
+ si->ra_report.txrate.bw = RATE_INFO_BW_20;
+
+legacy:
+ bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
+
+ si->ra_report.desc_rate = rate;
+ si->ra_report.bit_rate = bit_rate;
+
+ sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+}
+
+static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_fw_iter_ra_data ra_data;
+
+ if (WARN(length < 7, "invalid ra report c2h length\n"))
+ return;
+
+ rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
+ ra_data.rtwdev = rtwdev;
+ ra_data.payload = payload;
+ rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -49,6 +145,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
+ case C2H_RA_RPT:
+ rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
+ break;
default:
break;
}
@@ -397,6 +496,24 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_pg, loc_dpk;
+
+ loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
+ loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
+
+ LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
+ LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
+ LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -442,6 +559,58 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
+ struct rtw_lps_pg_dpk_hdr *dpk_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
+ dpk_hdr->dpk_ch = dpk_info->dpk_ch;
+ dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
+ memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
+ memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
+ memcpy(dpk_hdr->coef, dpk_info->coef, 160);
+
+ return skb;
+}
+
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
+ pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
+ pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ pg_info_hdr->sec_cam_count =
+ rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+
+ conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+
+ return skb;
+}
+
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum rtw_rsvd_packet_type type)
@@ -464,6 +633,12 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_QOS_NULL:
skb_new = ieee80211_nullfunc_get(hw, vif, true);
break;
+ case RSVD_LPS_PG_DPK:
+ skb_new = rtw_lps_pg_dpk_get(hw);
+ break;
+ case RSVD_LPS_PG_INFO:
+ skb_new = rtw_lps_pg_info_get(hw, vif);
+ break;
default:
return NULL;
}
@@ -498,9 +673,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
{
struct sk_buff *skb = rsvd_pkt->skb;
- if (rsvd_pkt->add_txdesc)
- rtw_fill_rsvd_page_desc(rtwdev, skb);
-
if (page >= 1)
memcpy(buf + page_margin + page_size * (page - 1),
skb->data, skb->len);
@@ -625,16 +797,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
if (!iter) {
- rtw_err(rtwdev, "fail to build rsvd packet\n");
+ rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
}
+
+ /* Fill the tx_desc for the rsvd pkt that requires one.
+ * And iter->len will be added with size of tx_desc_sz.
+ */
+ if (rsvd_pkt->add_txdesc)
+ rtw_fill_rsvd_page_desc(rtwdev, iter);
+
rsvd_pkt->skb = iter;
rsvd_pkt->page = total_page;
- if (rsvd_pkt->add_txdesc)
+
+ /* Reserved page is downloaded via TX path, and TX path will
+ * generate a tx_desc at the header to describe length of
+ * the buffer. If we are not counting page numbers with the
+ * size of tx_desc added at the first rsvd_pkt (usually a
+ * beacon, firmware default refer to the first page as the
+ * content of beacon), we could generate a buffer which size
+ * is smaller than the actual size of the whole rsvd_page
+ */
+ if (total_page == 0) {
+ if (rsvd_pkt->type != RSVD_BEACON) {
+ rtw_err(rtwdev, "first page should be a beacon\n");
+ goto release_skb;
+ }
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
page_size);
- else
+ } else {
total_page += rtw_len_to_page(iter->len, page_size);
+ }
}
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
@@ -647,13 +840,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
if (!buf)
goto release_skb;
+ /* Copy the content of each rsvd_pkt to the buf, and they should
+ * be aligned to the pages.
+ *
+ * Note that the first rsvd_pkt is a beacon no matter what vif->type.
+ * And that rsvd_pkt does not require tx_desc because when it goes
+ * through TX path, the TX path will generate one for it.
+ */
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
- page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
- }
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ if (page == 0)
+ page += rtw_len_to_page(rsvd_pkt->skb->len +
+ tx_desc_sz, page_size);
+ else
+ page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
+
kfree_skb(rsvd_pkt->skb);
+ }
return buf;
@@ -706,6 +910,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
goto free;
}
+ /* The last thing is to download the *ONLY* beacon again, because
+ * the previous tx_desc is to describe the total rsvd page. Download
+ * the beacon again to replace the TX desc header, and we will get
+ * a correct tx_desc for the beacon in the rsvd page.
+ */
ret = rtw_download_beacon(rtwdev, vif);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index e95d85bd097f..73d1b9ca8efc 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -11,22 +11,6 @@
/* FW bin information */
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
-#define FW_HDR_VERSION 4
-#define FW_HDR_SUBVERSION 6
-#define FW_HDR_SUBINDEX 7
-#define FW_HDR_MONTH 16
-#define FW_HDR_DATE 17
-#define FW_HDR_HOUR 18
-#define FW_HDR_MIN 19
-#define FW_HDR_YEAR 20
-#define FW_HDR_MEM_USAGE 24
-#define FW_HDR_H2C_FMT_VER 28
-#define FW_HDR_DMEM_ADDR 32
-#define FW_HDR_DMEM_SIZE 36
-#define FW_HDR_IMEM_SIZE 48
-#define FW_HDR_EMEM_SIZE 52
-#define FW_HDR_EMEM_ADDR 56
-#define FW_HDR_IMEM_ADDR 60
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
@@ -36,6 +20,7 @@
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
C2H_HW_FEATURE_DUMP = 0xfd,
@@ -58,6 +43,8 @@ enum rtw_rsvd_packet_type {
RSVD_PROBE_RESP,
RSVD_NULL,
RSVD_QOS_NULL,
+ RSVD_LPS_PG_DPK,
+ RSVD_LPS_PG_INFO,
};
enum rtw_fw_rf_type {
@@ -86,6 +73,25 @@ struct rtw_iqk_para {
u8 segment_iqk;
};
+struct rtw_lps_pg_dpk_hdr {
+ u16 dpk_path_ok;
+ u8 dpk_txagc[2];
+ u16 dpk_gs[2];
+ u32 coef[2][20];
+ u8 dpk_ch;
+} __packed;
+
+struct rtw_lps_pg_info_hdr {
+ u8 macid;
+ u8 mbssid;
+ u8 pattern_count;
+ u8 mu_tab_group_id;
+ u8 sec_cam_count;
+ u8 tx_bu_page_count;
+ u16 rsvd;
+ u8 sec_cam[MAX_PG_CAM_BACKUP_NUM];
+} __packed;
+
struct rtw_rsvd_page {
struct list_head list;
struct sk_buff *skb;
@@ -94,10 +100,44 @@ struct rtw_rsvd_page {
bool add_txdesc;
};
+struct rtw_fw_hdr {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version; /* 0x04 */
+ u8 subversion;
+ u8 subindex;
+ __le32 rsvd; /* 0x08 */
+ __le32 rsvd2; /* 0x0C */
+ u8 month; /* 0x10 */
+ u8 day;
+ u8 hour;
+ u8 min;
+ __le16 year; /* 0x14 */
+ __le16 rsvd3;
+ u8 mem_usage; /* 0x18 */
+ u8 rsvd4[3];
+ __le16 h2c_fmt_ver; /* 0x1C */
+ __le16 rsvd5;
+ __le32 dmem_addr; /* 0x20 */
+ __le32 dmem_size;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 imem_size; /* 0x30 */
+ __le32 emem_size;
+ __le32 emem_addr;
+ __le32 imem_addr;
+} __packed;
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
+#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
+#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6])
+#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1])
+
/* PKT H2C */
#define H2C_PKT_CMD_ID 0xFF
#define H2C_PKT_CATEGORY 0x01
@@ -146,6 +186,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
#define H2C_CMD_SET_PWR_MODE 0x20
+#define H2C_CMD_LPS_PG_INFO 0x2b
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
@@ -177,6 +218,12 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define LPS_PG_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define LPS_PG_DPK_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -270,6 +317,7 @@ void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev);
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw);
void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index aba329c9d0cf..4afbf0d163d1 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -13,6 +13,7 @@ struct rtw_hci_ops {
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
+ void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -47,6 +48,11 @@ static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
rtwdev->hci.ops->stop(rtwdev);
}
+static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ rtwdev->hci.ops->deep_ps(rtwdev, enter);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index b61b073031e5..507970387b2a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -47,7 +47,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
value8 = value8 & ~BIT_CHECK_CCK_EN;
- if (channel > 35)
+ if (IS_CH_5G_BAND(channel))
value8 |= BIT_CHECK_CCK_EN;
rtw_write8(rtwdev, REG_CCK_CHECK, value8);
}
@@ -261,7 +261,7 @@ static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
- rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
@@ -312,15 +312,16 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev)
static bool check_firmware_size(const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
u32 dmem_size;
u32 imem_size;
u32 emem_size;
u32 real_size;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
@@ -566,27 +567,10 @@ download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
return 0;
}
-static void update_firmware_info(struct rtw_dev *rtwdev,
- struct rtw_fw_state *fw)
-{
- const u8 *data = fw->firmware->data;
-
- fw->h2c_version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
- fw->version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
- fw->sub_version = *(data + FW_HDR_SUBVERSION);
- fw->sub_index = *(data + FW_HDR_SUBINDEX);
-
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
-}
-
static int
start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
const u8 *cur_fw;
u16 val;
u32 imem_size;
@@ -595,10 +579,10 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
u32 addr;
int ret;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
@@ -608,14 +592,14 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
cur_fw = data + FW_HDR_SIZE;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->dmem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
if (ret)
return ret;
cur_fw = data + FW_HDR_SIZE + dmem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->imem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
if (ret)
@@ -623,7 +607,7 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
if (emem_size) {
cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->emem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
emem_size);
@@ -699,15 +683,13 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
if (ret)
goto dlfw_fail;
- update_firmware_info(rtwdev, fw);
-
/* reset desc and index */
rtw_hci_setup(rtwdev);
rtwdev->h2c.last_box_num = 0;
rtwdev->h2c.seq = 0;
- rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
+ set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
return 0;
@@ -719,6 +701,93 @@ dlfw_fail:
return ret;
}
+static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
+{
+ struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ u32 prio_queues = 0;
+
+ if (queues & BIT(IEEE80211_AC_VO))
+ prio_queues |= BIT(rqpn->dma_map_vo);
+ if (queues & BIT(IEEE80211_AC_VI))
+ prio_queues |= BIT(rqpn->dma_map_vi);
+ if (queues & BIT(IEEE80211_AC_BE))
+ prio_queues |= BIT(rqpn->dma_map_be);
+ if (queues & BIT(IEEE80211_AC_BK))
+ prio_queues |= BIT(rqpn->dma_map_bk);
+
+ return prio_queues;
+}
+
+static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
+ u32 prio_queue, bool drop)
+{
+ u32 addr;
+ u16 avail_page, rsvd_page;
+ int i;
+
+ switch (prio_queue) {
+ case RTW_DMA_MAPPING_EXTRA:
+ addr = REG_FIFOPAGE_INFO_4;
+ break;
+ case RTW_DMA_MAPPING_LOW:
+ addr = REG_FIFOPAGE_INFO_2;
+ break;
+ case RTW_DMA_MAPPING_NORMAL:
+ addr = REG_FIFOPAGE_INFO_3;
+ break;
+ case RTW_DMA_MAPPING_HIGH:
+ addr = REG_FIFOPAGE_INFO_1;
+ break;
+ default:
+ return;
+ }
+
+ /* check if all of the reserved pages are available for 100 msecs */
+ for (i = 0; i < 5; i++) {
+ rsvd_page = rtw_read16(rtwdev, addr);
+ avail_page = rtw_read16(rtwdev, addr + 2);
+ if (rsvd_page == avail_page)
+ return;
+
+ msleep(20);
+ }
+
+ /* priority queue is still not empty, throw a warning,
+ *
+ * Note that if we want to flush the tx queue when having a lot of
+ * traffic (ex, 100Mbps up), some of the packets could be dropped.
+ * And it requires like ~2secs to flush the full priority queue.
+ */
+ if (!drop)
+ rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+}
+
+static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
+ u32 prio_queues, bool drop)
+{
+ u32 q;
+
+ for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
+ if (prio_queues & BIT(q))
+ __rtw_mac_flush_prio_queue(rtwdev, q, drop);
+}
+
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+ u32 prio_queues = 0;
+
+ /* If all of the hardware queues are requested to flush,
+ * or the priority queues are not mapped yet,
+ * flush all of the priority queues
+ */
+ if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
+ prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
+ else
+ prio_queues = get_priority_queues(rtwdev, queues);
+
+ rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
+}
+
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -743,6 +812,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
return -EINVAL;
}
+ rtwdev->fifo.rqpn = rqpn;
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index efe6f731f240..592dc830160c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -31,5 +31,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
+
+static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
+{
+ rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index a203b4705b94..34a1c3b53cd4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -10,6 +10,7 @@
#include "coex.h"
#include "ps.h"
#include "reg.h"
+#include "bf.h"
#include "debug.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
@@ -17,19 +18,30 @@ static void rtw_ops_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_tx_pkt_info pkt_info = {0};
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- goto out;
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
- rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
- if (rtw_hci_tx(rtwdev, &pkt_info, skb))
- goto out;
+ rtw_tx(rtwdev, control, skb);
+}
- return;
+static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
-out:
- ieee80211_free_txskb(hw, skb);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (list_empty(&rtwtxq->list))
+ list_add_tail(&rtwtxq->list, &rtwdev->txqs);
+ spin_unlock_bh(&rtwdev->txq_lock);
+
+ tasklet_schedule(&rtwdev->tx_tasklet);
}
static int rtw_ops_start(struct ieee80211_hw *hw)
@@ -60,6 +72,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (hw->conf.flags & IEEE80211_CONF_IDLE) {
rtw_enter_ips(rtwdev);
@@ -72,6 +86,15 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ rtwdev->ps_enabled = true;
+ } else {
+ rtwdev->ps_enabled = false;
+ rtw_leave_lps(rtwdev);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
@@ -135,10 +158,14 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
rtwvif->in_lps = false;
+ memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
+ rtw_txq_init(rtwdev, vif->txq);
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
@@ -181,6 +208,10 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_txq_cleanup(rtwdev, vif->txq);
+
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = RTW_NET_NO_LINK;
@@ -204,6 +235,8 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
rtwdev->hal.rcr |= BIT_AM | BIT_AB;
@@ -238,6 +271,54 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+/* Only have one group of EDCA parameters now */
+static const u32 ac_to_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+};
+
+static u8 rtw_aifsn_to_aifs(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u8 aifsn)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 slot_time;
+ u8 sifs;
+
+ slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+ sifs = rtwdev->hal.current_band_type == RTW_BAND_5G ? 16 : 10;
+
+ return aifsn * slot_time + sifs;
+}
+
+static void __rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u16 ac)
+{
+ struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
+ u32 edca_param = ac_to_edca_param[ac];
+ u8 ecw_max, ecw_min;
+ u8 aifs;
+
+ /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */
+ ecw_max = ilog2(params->cw_max + 1);
+ ecw_min = ilog2(params->cw_min + 1);
+ aifs = rtw_aifsn_to_aifs(rtwdev, rtwvif, params->aifs);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_TXOP_LMT, params->txop);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMAX, ecw_max);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMIN, ecw_min);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_AIFS, aifs);
+}
+
+static void rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ u16 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+}
+
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
@@ -249,6 +330,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & BSS_CHANGED_ASSOC) {
struct rtw_chip_info *chip = rtwdev->chip;
enum rtw_net_type net_type;
@@ -262,13 +345,19 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
+ if (rtw_bf_support)
+ rtw_bf_assoc(rtwdev, vif, conf);
} else {
+ rtw_leave_lps(rtwdev);
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
rtw_reset_rsvd_page(rtwdev);
+ rtw_bf_disassoc(rtwdev, vif, conf);
}
rtwvif->net_type = net_type;
@@ -284,11 +373,39 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev, vif);
+ if (changed & BSS_CHANGED_MU_GROUPS) {
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->set_gid_table(rtwdev, vif, conf);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT)
+ rtw_conf_tx(rtwdev, rtwvif);
+
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw_leave_lps_deep(rtwdev);
+
+ rtwvif->tx_params[ac] = *params;
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -311,6 +428,7 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -325,6 +443,8 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_init(rtwdev, sta->txq[i]);
rtw_update_sta_info(rtwdev, si);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -345,12 +465,18 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
mutex_lock(&rtwdev->mutex);
rtw_release_macid(rtwdev, si->mac_id);
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_cleanup(rtwdev, sta->txq[i]);
+
+ kfree(si->mask);
+
rtwdev->sta_cnt--;
rtw_info(rtwdev, "sta %pM with macid %d left\n",
@@ -397,6 +523,8 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
hw_key_idx = rtw_sec_get_free_cam(sec);
} else {
@@ -418,10 +546,15 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
hw_key_type, hw_key_idx);
break;
case DISABLE_KEY:
+ rtw_mac_flush_all_queues(rtwdev, false);
rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
break;
}
+ /* download new cam settings for PG to backup */
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+
out:
mutex_unlock(&rtwdev->mutex);
@@ -434,6 +567,8 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
{
struct ieee80211_sta *sta = params->sta;
u16 tid = params->tid;
+ struct ieee80211_txq *txq = sta->txq[tid];
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
@@ -441,9 +576,12 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ clear_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ set_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
+ break;
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
break;
@@ -463,18 +601,18 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_leave_lps(rtwdev, rtwvif);
-
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps(rtwdev);
+
ether_addr_copy(rtwvif->mac_addr, mac_addr);
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
- rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
- rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+ set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+ set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
mutex_unlock(&rtwdev->mutex);
}
@@ -488,8 +626,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+ clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
ether_addr_copy(rtwvif->mac_addr, vif->addr);
config |= PORT_SET_MAC_ADDR;
@@ -507,12 +645,99 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtwdev->rts_threshold = value;
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static void rtw_ops_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ sinfo->txrate = si->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void rtw_ops_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_mac_flush_queues(rtwdev, queues, drop);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+struct rtw_iter_bitrate_mask_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_vif *vif;
+ const struct cfg80211_bitrate_mask *mask;
+};
+
+static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_iter_bitrate_mask_data *br_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ if (si->vif != br_data->vif)
+ return;
+
+ /* free previous mask setting */
+ kfree(si->mask);
+ si->mask = kmemdup(br_data->mask, sizeof(struct cfg80211_bitrate_mask),
+ GFP_ATOMIC);
+ if (!si->mask) {
+ si->use_cfg_mask = false;
+ return;
+ }
+
+ si->use_cfg_mask = true;
+ rtw_update_sta_info(br_data->rtwdev, si);
+}
+
+static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_iter_bitrate_mask_data br_data;
+
+ br_data.rtwdev = rtwdev;
+ br_data.vif = vif;
+ br_data.mask = mask;
+ rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+}
+
+static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_ra_mask_info_update(rtwdev, vif, mask);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
+ .wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
.stop = rtw_ops_stop,
.config = rtw_ops_config,
@@ -520,6 +745,7 @@ const struct ieee80211_ops rtw_ops = {
.remove_interface = rtw_ops_remove_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
+ .conf_tx = rtw_ops_conf_tx,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
@@ -527,5 +753,9 @@ const struct ieee80211_ops rtw_ops = {
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
+ .set_rts_threshold = rtw_ops_set_rts_threshold,
+ .sta_statistics = rtw_ops_sta_statistics,
+ .flush = rtw_ops_flush,
+ .set_bitrate_mask = rtw_ops_set_bitrate_mask,
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6dd457741b15..ae61415e1665 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -12,16 +12,22 @@
#include "phy.h"
#include "reg.h"
#include "efuse.h"
+#include "tx.h"
#include "debug.h"
+#include "bf.h"
-static bool rtw_fw_support_lps;
+unsigned int rtw_fw_lps_deep_mode;
+EXPORT_SYMBOL(rtw_fw_lps_deep_mode);
+bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
-module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
+module_param_named(lps_deep_mode, rtw_fw_lps_deep_mode, uint, 0644);
+module_param_named(support_bf, rtw_bf_support, bool, 0644);
module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
-MODULE_PARM_DESC(support_lps, "Set Y to enable Leisure Power Save support, to turn radio off between beacons");
+MODULE_PARM_DESC(lps_deep_mode, "Deeper PS mode. If 0, deep PS is disabled");
+MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support");
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static struct ieee80211_channel rtw_channeltable_2g[] = {
@@ -84,6 +90,18 @@ static struct ieee80211_rate rtw_ratetable[] = {
{.bitrate = 540, .hw_value = 0x0b,},
};
+u16 rtw_desc_to_bitrate(u8 desc_rate)
+{
+ struct ieee80211_rate rate;
+
+ if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n"))
+ return 0;
+
+ rate = rtw_ratetable[desc_rate];
+
+ return rate.bitrate;
+}
+
static struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
@@ -112,29 +130,40 @@ static struct ieee80211_supported_band rtw_band_5ghz = {
};
struct rtw_watch_dog_iter_data {
+ struct rtw_dev *rtwdev;
struct rtw_vif *rtwvif;
- bool active;
- u8 assoc_cnt;
};
+static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 fix_rate_enable = 0;
+ u8 new_csi_rate_idx;
+
+ if (rtwvif->bfee.role != RTW_BFEE_SU &&
+ rtwvif->bfee.role != RTW_BFEE_MU)
+ return;
+
+ chip->ops->cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
+ bf_info->cur_csi_rpt_rate,
+ fix_rate_enable, &new_csi_rate_idx);
+
+ if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate)
+ bf_info->cur_csi_rpt_rate = new_csi_rate_idx;
+}
+
static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_watch_dog_iter_data *iter_data = data;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
- if (vif->type == NL80211_IFTYPE_STATION) {
- if (vif->bss_conf.assoc) {
- iter_data->assoc_cnt++;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->bss_conf.assoc)
iter_data->rtwvif = rtwvif;
- }
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
- rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- iter_data->active = true;
- } else {
- /* only STATION mode can enter lps */
- iter_data->active = true;
- }
+
+ rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif);
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
@@ -149,46 +178,74 @@ static void rtw_watch_dog_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
struct rtw_watch_dog_iter_data data = {};
- bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ bool ps_active;
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- return;
+ mutex_lock(&rtwdev->mutex);
+
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100)
- rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
else
- rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
- if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC))
+ if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev);
+ if (stats->tx_cnt > RTW_LPS_THRESHOLD ||
+ stats->rx_cnt > RTW_LPS_THRESHOLD)
+ ps_active = true;
+ else
+ ps_active = false;
+
+ ewma_tp_add(&stats->tx_ewma_tp,
+ (u32)(stats->tx_unicast >> RTW_TP_SHIFT));
+ ewma_tp_add(&stats->rx_ewma_tp,
+ (u32)(stats->rx_unicast >> RTW_TP_SHIFT));
+ stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
+ stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
+
/* reset tx/rx statictics */
- rtwdev->stats.tx_unicast = 0;
- rtwdev->stats.rx_unicast = 0;
- rtwdev->stats.tx_cnt = 0;
- rtwdev->stats.rx_cnt = 0;
+ stats->tx_unicast = 0;
+ stats->rx_unicast = 0;
+ stats->tx_cnt = 0;
+ stats->rx_cnt = 0;
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ goto unlock;
+
+ /* make sure BB/RF is working for dynamic mech */
+ rtw_leave_lps(rtwdev);
+
+ rtw_phy_dynamic_mechanism(rtwdev);
+ data.rtwdev = rtwdev;
/* use atomic version to avoid taking local->iflist_mtx mutex */
rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
+ *
+ * mac80211 should iterate vifs and determine if driver can enter
+ * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
+ * get that vif and check if device is having traffic more than the
+ * threshold.
*/
- if (rtw_fw_support_lps &&
- data.rtwvif && !data.active && data.assoc_cnt == 1)
- rtw_enter_lps(rtwdev, data.rtwvif);
-
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
- return;
-
- rtw_phy_dynamic_mechanism(rtwdev);
+ if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+ rtw_enter_lps(rtwdev, data.rtwvif->port);
rtwdev->watch_dog_cnt++;
+
+unlock:
+ mutex_unlock(&rtwdev->mutex);
}
static void rtw_c2h_work(struct work_struct *work)
@@ -203,6 +260,40 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+struct rtw_txq_ba_iter_data {
+};
+
+static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int ret;
+ u8 tid;
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ while (tid != IEEE80211_NUM_TIDS) {
+ clear_bit(tid, si->tid_ba);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (ret == -EINVAL) {
+ struct ieee80211_txq *txq;
+ struct rtw_txq *rtwtxq;
+
+ txq = sta->txq[tid];
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags);
+ }
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ }
+}
+
+static void rtw_txq_ba_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work);
+ struct rtw_txq_ba_iter_data data;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
@@ -311,7 +402,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
} else {
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G);
else
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN);
@@ -529,12 +620,71 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
+static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
+ struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable,
+ u8 wireless_set)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct cfg80211_bitrate_mask *mask = si->mask;
+ u64 cfg_mask = GENMASK_ULL(63, 0);
+ u8 rssi_level, band;
+
+ if (wireless_set != WIRELESS_CCK) {
+ rssi_level = si->rssi_level;
+ if (rssi_level == 0)
+ ra_mask &= 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ ra_mask &= 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ ra_mask &= 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ ra_mask &= 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ ra_mask &= 0xffffffffffff8f80ULL;
+ else if (rssi_level >= 5)
+ ra_mask &= 0xffffffffffff0f00ULL;
+ }
+
+ if (!si->use_cfg_mask)
+ return ra_mask;
+
+ band = hal->current_band_type;
+ if (band == RTW_BAND_2G) {
+ band = NL80211_BAND_2GHZ;
+ cfg_mask = mask->control[band].legacy;
+ } else if (band == RTW_BAND_5G) {
+ band = NL80211_BAND_5GHZ;
+ cfg_mask = u64_encode_bits(mask->control[band].legacy,
+ RA_MASK_OFDM_RATES);
+ }
+
+ if (!is_vht_enable) {
+ if (ra_mask & RA_MASK_HT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
+ RA_MASK_HT_RATES_1SS);
+ if (ra_mask & RA_MASK_HT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
+ RA_MASK_HT_RATES_2SS);
+ } else {
+ if (ra_mask & RA_MASK_VHT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
+ RA_MASK_VHT_RATES_1SS);
+ if (ra_mask & RA_MASK_VHT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
+ RA_MASK_VHT_RATES_2SS);
+ }
+
+ ra_mask &= cfg_mask;
+
+ return ra_mask;
+}
+
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
struct ieee80211_sta *sta = si->sta;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_hal *hal = &rtwdev->hal;
- u8 rssi_level;
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
@@ -627,21 +777,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
+ wireless_set);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -737,7 +874,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
- rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
+ set_bit(RTW_FLAG_RUNNING, rtwdev->flags);
return 0;
}
@@ -752,8 +889,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
{
struct rtw_coex *coex = &rtwdev->coex;
- rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
+ clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
@@ -814,6 +951,12 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
0;
+
+ vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
+ IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
@@ -879,12 +1022,25 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_dev *rtwdev = context;
struct rtw_fw_state *fw = &rtwdev->fw;
+ const struct rtw_fw_hdr *fw_hdr;
- if (!firmware)
+ if (!firmware || !firmware->data) {
rtw_err(rtwdev, "failed to request firmware\n");
+ complete_all(&fw->completion);
+ return;
+ }
+
+ fw_hdr = (const struct rtw_fw_hdr *)firmware->data;
+ fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
+ fw->version = le16_to_cpu(fw_hdr->version);
+ fw->sub_version = fw_hdr->subversion;
+ fw->sub_index = fw_hdr->subindex;
fw->firmware = firmware;
complete_all(&fw->completion);
+
+ rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n",
+ fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
@@ -914,6 +1070,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtwdev->hci.rpwm_addr = 0x03d9;
+ rtwdev->hci.cpwm_addr = 0x03da;
break;
default:
rtw_err(rtwdev, "unsupported hci type\n");
@@ -948,6 +1105,8 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
/* default use ack */
rtwdev->hal.rcr |= BIT_VHT_DACK;
+ hal->bfee_sts_cap = 3;
+
return ret;
}
@@ -1020,7 +1179,8 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
- if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
+ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE ||
+ efuse->hw_cap.nss > rtwdev->hal.rf_path_num)
efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
rtw_dbg(rtwdev, RTW_DBG_EFUSE,
@@ -1047,19 +1207,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
/* power on mac to read efuse */
ret = rtw_chip_efuse_enable(rtwdev);
if (ret)
- goto out;
+ goto out_unlock;
ret = rtw_parse_efuse_map(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_dump_hw_feature(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_check_supported_rfe(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
if (efuse->crystal_cap == 0xff)
efuse->crystal_cap = 0;
@@ -1086,9 +1246,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+out_disable:
rtw_chip_efuse_disable(rtwdev);
-out:
+out_unlock:
mutex_unlock(&rtwdev->mutex);
return ret;
}
@@ -1141,22 +1302,41 @@ err_out:
}
EXPORT_SYMBOL(rtw_chip_info_setup);
+static void rtw_stats_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ int i;
+
+ ewma_tp_init(&stats->tx_ewma_tp);
+ ewma_tp_init(&stats->rx_ewma_tp);
+
+ for (i = 0; i < RTW_EVM_NUM; i++)
+ ewma_evm_init(&dm_info->ewma_evm[i]);
+ for (i = 0; i < RTW_SNR_NUM; i++)
+ ewma_snr_init(&dm_info->ewma_snr[i]);
+}
+
int rtw_core_init(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
+ INIT_LIST_HEAD(&rtwdev->txqs);
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
+ tasklet_init(&rtwdev->tx_tasklet, rtw_tx_tasklet,
+ (unsigned long)rtwdev);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
- INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
@@ -1164,6 +1344,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
+ spin_lock_init(&rtwdev->txq_lock);
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
@@ -1175,11 +1356,17 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
+ if (!(BIT(rtw_fw_lps_deep_mode) & chip->lps_deep_mode_supported))
+ rtwdev->lps_conf.deep_mode = LPS_DEEP_MODE_NONE;
+ else
+ rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode;
mutex_lock(&rtwdev->mutex);
rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
mutex_unlock(&rtwdev->mutex);
+ rtw_stats_init(rtwdev);
+
/* default rx filter setting */
rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
@@ -1204,6 +1391,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
if (fw->firmware)
release_firmware(fw->firmware);
+ tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
@@ -1229,6 +1417,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->extra_tx_headroom = max_tx_headroom;
hw->queues = IEEE80211_NUM_ACS;
+ hw->txq_data_size = sizeof(struct rtw_txq);
hw->sta_data_size = sizeof(struct rtw_sta_info);
hw->vif_data_size = sizeof(struct rtw_vif);
@@ -1241,6 +1430,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, TX_AMSDU);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1252,6 +1443,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
@@ -1268,6 +1461,9 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_debugfs_init(rtwdev);
+ rtwdev->bf_info.bfer_mu_cnt = 0;
+ rtwdev->bf_info.bfer_su_cnt = 0;
+
return 0;
}
EXPORT_SYMBOL(rtw_register_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index bede3f38516e..d012eefcd0da 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -11,11 +11,13 @@
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/interrupt.h>
#include "util.h"
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
+#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
@@ -27,6 +29,10 @@
#define RTW_RF_PATH_MAX 4
#define HW_FEATURE_LEN 13
+#define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */
+
+extern bool rtw_bf_support;
+extern unsigned int rtw_fw_lps_deep_mode;
extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
extern struct rtw_chip_info rtw8822b_hw_spec;
@@ -50,10 +56,24 @@ struct rtw_hci {
enum rtw_hci_type type;
u32 rpwm_addr;
+ u32 cpwm_addr;
u8 bulkout_num;
};
+#define IS_CH_5G_BAND_1(channel) ((channel) >= 36 && (channel <= 48))
+#define IS_CH_5G_BAND_2(channel) ((channel) >= 52 && (channel <= 64))
+#define IS_CH_5G_BAND_3(channel) ((channel) >= 100 && (channel <= 144))
+#define IS_CH_5G_BAND_4(channel) ((channel) >= 149 && (channel <= 177))
+
+#define IS_CH_5G_BAND_MID(channel) \
+ (IS_CH_5G_BAND_2(channel) || IS_CH_5G_BAND_3(channel))
+
+#define IS_CH_2G_BAND(channel) ((channel) <= 14)
+#define IS_CH_5G_BAND(channel) \
+ (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel) || \
+ IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel))
+
enum rtw_supported_band {
RTW_BAND_2G = 1 << 0,
RTW_BAND_5G = 1 << 1,
@@ -303,18 +323,50 @@ enum rtw_regulatory_domains {
RTW_REGD_MAX
};
+enum rtw_txq_flags {
+ RTW_TXQ_AMPDU,
+ RTW_TXQ_BLOCK_BA,
+};
+
enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
+ RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
NUM_OF_RTW_FLAGS,
};
+enum rtw_evm {
+ RTW_EVM_OFDM = 0,
+ RTW_EVM_1SS,
+ RTW_EVM_2SS_A,
+ RTW_EVM_2SS_B,
+ /* keep it last */
+ RTW_EVM_NUM
+};
+
+enum rtw_snr {
+ RTW_SNR_OFDM_A = 0,
+ RTW_SNR_OFDM_B,
+ RTW_SNR_OFDM_C,
+ RTW_SNR_OFDM_D,
+ RTW_SNR_1SS_A,
+ RTW_SNR_1SS_B,
+ RTW_SNR_1SS_C,
+ RTW_SNR_1SS_D,
+ RTW_SNR_2SS_A,
+ RTW_SNR_2SS_B,
+ RTW_SNR_2SS_C,
+ RTW_SNR_2SS_D,
+ /* keep it last */
+ RTW_SNR_NUM
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -480,6 +532,7 @@ struct rtw_tx_pkt_info {
bool fs;
bool short_gi;
bool report;
+ bool rts;
};
struct rtw_rx_pkt_stat {
@@ -502,10 +555,16 @@ struct rtw_rx_pkt_stat {
s8 rx_power[RTW_RF_PATH_MAX];
u8 rssi;
u8 rxsc;
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm[RTW_RF_PATH_MAX];
+ s8 cfo_tail[RTW_RF_PATH_MAX];
+
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
};
+DECLARE_EWMA(tp, 10, 2);
+
struct rtw_traffic_stats {
/* units in bytes */
u64 tx_unicast;
@@ -518,6 +577,8 @@ struct rtw_traffic_stats {
/* units in Mbps */
u32 tx_throughput;
u32 rx_throughput;
+ struct ewma_tp tx_ewma_tp;
+ struct ewma_tp rx_ewma_tp;
};
enum rtw_lps_mode {
@@ -526,6 +587,12 @@ enum rtw_lps_mode {
RTW_MODE_WMM_PS = 2,
};
+enum rtw_lps_deep_mode {
+ LPS_DEEP_MODE_NONE = 0,
+ LPS_DEEP_MODE_LCLK = 1,
+ LPS_DEEP_MODE_PG = 2,
+};
+
enum rtw_pwr_state {
RTW_RF_OFF = 0x0,
RTW_RF_ON = 0x4,
@@ -533,14 +600,14 @@ enum rtw_pwr_state {
};
struct rtw_lps_conf {
- /* the interface to enter lps */
- struct rtw_vif *rtwvif;
enum rtw_lps_mode mode;
+ enum rtw_lps_deep_mode deep_mode;
enum rtw_pwr_state state;
u8 awake_interval;
u8 rlbm;
u8 smart_ps;
u8 port_id;
+ bool sec_cam_backup;
};
enum rtw_hw_key_type {
@@ -576,6 +643,19 @@ struct rtw_tx_report {
struct timer_list purge_timer;
};
+struct rtw_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u8 desc_rate;
+};
+
+struct rtw_txq {
+ struct list_head list;
+
+ unsigned long flags;
+ unsigned long last_push;
+};
+
#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
@@ -598,6 +678,41 @@ struct rtw_sta_info {
bool updated;
u8 init_ra_lv;
u64 ra_mask;
+
+ DECLARE_BITMAP(tid_ba, IEEE80211_NUM_TIDS);
+
+ struct rtw_ra_report ra_report;
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask *mask;
+};
+
+enum rtw_bfee_role {
+ RTW_BFEE_NONE,
+ RTW_BFEE_SU,
+ RTW_BFEE_MU
+};
+
+struct rtw_bfee {
+ enum rtw_bfee_role role;
+
+ u16 p_aid;
+ u8 g_id;
+ u8 mac_addr[ETH_ALEN];
+ u8 sound_dim;
+
+ /* SU-MIMO */
+ u8 su_reg_index;
+
+ /* MU-MIMO */
+ u16 aid;
+};
+
+struct rtw_bf_info {
+ u8 bfer_mu_cnt;
+ u8 bfer_su_cnt;
+ DECLARE_BITMAP(bfer_su_reg_maping, 2);
+ u8 cur_csi_rpt_rate;
};
struct rtw_vif {
@@ -608,10 +723,13 @@ struct rtw_vif {
u8 bssid[ETH_ALEN];
u8 port;
u8 bcn_ctrl;
+ struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
bool in_lps;
+
+ struct rtw_bfee bfee;
};
struct rtw_regulatory {
@@ -643,6 +761,14 @@ struct rtw_chip_ops {
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level);
+ void (*pwr_track)(struct rtw_dev *rtwdev);
+ void (*config_bfee)(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable);
+ void (*set_gid_table)(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+ void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -747,6 +873,7 @@ enum rtw_dma_mapping {
RTW_DMA_MAPPING_NORMAL = 2,
RTW_DMA_MAPPING_HIGH = 3,
+ RTW_DMA_MAPPING_MAX,
RTW_DMA_MAPPING_UNDEF,
};
@@ -818,6 +945,34 @@ struct rtw_rfe_def {
.txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
}
+#define RTW_PWR_TRK_5G_1 0
+#define RTW_PWR_TRK_5G_2 1
+#define RTW_PWR_TRK_5G_3 2
+#define RTW_PWR_TRK_5G_NUM 3
+
+#define RTW_PWR_TRK_TBL_SZ 30
+
+/* This table stores the values of TX power that will be adjusted by power
+ * tracking.
+ *
+ * For 5G bands, there are 3 different settings.
+ * For 2G there are cck rate and ofdm rate with different settings.
+ */
+struct rtw_pwr_track_tbl {
+ const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_2gb_n;
+ const u8 *pwrtrk_2gb_p;
+ const u8 *pwrtrk_2ga_n;
+ const u8 *pwrtrk_2ga_p;
+ const u8 *pwrtrk_2g_cckb_n;
+ const u8 *pwrtrk_2g_cckb_p;
+ const u8 *pwrtrk_2g_ccka_n;
+ const u8 *pwrtrk_2g_ccka_p;
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -844,6 +999,7 @@ struct rtw_chip_info {
bool ht_supported;
bool vht_supported;
+ u8 lps_deep_mode_supported;
/* init values */
u8 sys_func_en;
@@ -868,6 +1024,11 @@ struct rtw_chip_info {
bool en_dis_dpd;
u16 dpd_ratemask;
+ u8 iqk_threshold;
+ const struct rtw_pwr_track_tbl *pwr_track_tbl;
+
+ u8 bfer_su_max_num;
+ u8 bfer_mu_max_num;
/* coex paras */
u32 coex_para_ver;
@@ -1121,10 +1282,26 @@ struct rtw_phy_cck_pd_reg {
#define DACK_MSBK_BACKUP_NUM 0xf
#define DACK_DCK_BACKUP_NUM 0x2
+struct rtw_swing_table {
+ const u8 *p[RTW_RF_PATH_MAX];
+ const u8 *n[RTW_RF_PATH_MAX];
+};
+
+struct rtw_pkt_count {
+ u16 num_bcn_pkt;
+ u16 num_qry_pkt[DESC_RATE_MAX];
+};
+
+DECLARE_EWMA(evm, 10, 4);
+DECLARE_EWMA(snr, 10, 4);
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
+ u32 cck_cca_cnt;
+ u32 ofdm_cca_cnt;
+ u32 total_cca_cnt;
u32 cck_ok_cnt;
u32 cck_err_cnt;
@@ -1147,6 +1324,15 @@ struct rtw_dm_info {
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
+ u8 tx_rate;
+ u8 thermal_avg[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
+ s8 delta_power_index[RTW_RF_PATH_MAX];
+ u8 default_ofdm_index;
+ bool pwr_trk_triggered;
+ bool pwr_trk_init_trigger;
+ struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
+
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM];
@@ -1157,6 +1343,17 @@ struct rtw_dm_info {
/* [bandwidth 0:20M/1:40M][number of path] */
u8 cck_pd_lv[2][RTW_RF_PATH_MAX];
u32 cck_fa_avg;
+
+ /* save the last rx phy status for debug */
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm_dbm[RTW_RF_PATH_MAX];
+ s16 cfo_tail[RTW_RF_PATH_MAX];
+ u8 rssi[RTW_RF_PATH_MAX];
+ u8 curr_rx_rate;
+ struct rtw_pkt_count cur_pkt_count;
+ struct rtw_pkt_count last_pkt_count;
+ struct ewma_evm ewma_evm[RTW_EVM_NUM];
+ struct ewma_snr ewma_snr[RTW_SNR_NUM];
};
struct rtw_efuse {
@@ -1170,7 +1367,9 @@ struct rtw_efuse {
u8 country_code[2];
u8 rf_board_option;
u8 rfe_option;
- u8 thermal_meter;
+ u8 power_track_type;
+ u8 thermal_meter[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
u8 crystal_cap;
u8 ant_div_cfg;
u8 ant_div_type;
@@ -1252,7 +1451,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
+ struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
@@ -1289,6 +1488,7 @@ struct rtw_hal {
u8 rf_path_num;
u8 antenna_tx;
u8 antenna_rx;
+ u8 bfee_sts_cap;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1326,6 +1526,7 @@ struct rtw_dev {
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
struct rtw_regulatory regd;
+ struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
struct rtw_coex coex;
@@ -1349,6 +1550,12 @@ struct rtw_dev {
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ /* used to protect txqs list */
+ spinlock_t txq_lock;
+ struct list_head txqs;
+ struct tasklet_struct tx_tasklet;
+ struct work_struct ba_work;
+
struct rtw_tx_report tx_report;
struct {
@@ -1361,11 +1568,12 @@ struct rtw_dev {
/* lps power state & handler work */
struct rtw_lps_conf lps_conf;
- struct delayed_work lps_work;
+ bool ps_enabled;
struct dentry *debugfs;
u8 sta_cnt;
+ u32 rts_threshold;
DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
@@ -1378,24 +1586,23 @@ struct rtw_dev {
#include "hci.h"
-static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
{
- return test_bit(flag, rtwdev->flags);
+ return !!rtwdev->sta_cnt;
}
-static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline struct ieee80211_txq *rtwtxq_to_txq(struct rtw_txq *rtwtxq)
{
- clear_bit(flag, rtwdev->flags);
-}
+ void *p = rtwtxq;
-static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
-{
- set_bit(flag, rtwdev->flags);
+ return container_of(p, struct ieee80211_txq, drv_priv);
}
-static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
+static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
{
- return !!rtwdev->sta_cnt;
+ void *p = rtwvif;
+
+ return container_of(p, struct ieee80211_vif, drv_priv);
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1405,6 +1612,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num);
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss);
void rtw_set_channel(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
@@ -1417,5 +1625,6 @@ int rtw_core_init(struct rtw_dev *rtwdev);
void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+u16 rtw_desc_to_bitrate(u8 desc_rate);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index d90928be663b..17b9cdf9cb05 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -9,6 +9,7 @@
#include "tx.h"
#include "rx.h"
#include "fw.h"
+#include "ps.h"
#include "debug.h"
static bool rtw_disable_msi;
@@ -457,9 +458,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
/* reset read/write point */
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
- /* rest H2C Queue index */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+ /* reset H2C Queue index in a single write */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -536,6 +537,69 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
+static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ bool tx_empty = true;
+ u8 queue;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ /* Deep PS state is not allowed to TX-DMA */
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
+ /* BCN queue is rsvd page, does not have DMA interrupt
+ * H2C queue is managed by firmware
+ */
+ if (queue == RTW_TX_QUEUE_BCN ||
+ queue == RTW_TX_QUEUE_H2C)
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[queue];
+
+ /* check if there is any skb DMAing */
+ if (skb_queue_len(&tx_ring->queue)) {
+ tx_empty = false;
+ break;
+ }
+ }
+
+ if (!tx_empty) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "TX path not empty, cannot enter deep power save state\n");
+ return;
+ }
+
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+ rtw_power_mode_change(rtwdev, true);
+}
+
+static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_power_mode_change(rtwdev, false);
+}
+
+static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_enter(rtwdev);
+
+ if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_leave(rtwdev);
+
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
static u8 ac_to_hwq[] = {
[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
@@ -616,6 +680,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
u32 bd_idx;
+ unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -651,6 +716,10 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
/* kick off tx queue */
@@ -666,6 +735,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
}
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return 0;
}
@@ -1120,8 +1190,6 @@ static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
goto err_io_unmap;
}
- rtw_pci_phy_cfg(rtwdev);
-
return 0;
err_io_unmap:
@@ -1142,6 +1210,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
+ .deep_ps = rtw_pci_deep_ps,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1233,6 +1302,8 @@ static int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
+ rtw_pci_phy_cfg(rtwdev);
+
ret = rtw_register_hw(rtwdev, hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index d3d3f40de75e..a3e1e9578b65 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -20,15 +20,6 @@ union phy_table_tile {
struct phy_cfg_pair cfg;
};
-struct phy_pg_cfg_pair {
- u32 band;
- u32 rf_path;
- u32 tx_num;
- u32 addr;
- u32 bitmask;
- u32 data;
-};
-
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
@@ -118,7 +109,7 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
for (j = 0; j < RTW_RF_PATH_MAX; j++)
- dm_info->cck_pd_lv[i][j] = 0;
+ dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
}
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
@@ -222,10 +213,19 @@ static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
dm_info->min_rssi = data.min_rssi;
}
+static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->last_pkt_count = dm_info->cur_pkt_count;
+ memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count));
+}
+
static void rtw_phy_statistics(struct rtw_dev *rtwdev)
{
rtw_phy_stat_rssi(rtwdev);
rtw_phy_stat_false_alarm(rtwdev);
+ rtw_phy_stat_rate_cnt(rtwdev);
}
#define DIG_PERF_FA_TH_LOW 250
@@ -394,7 +394,7 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev)
u8 step[3];
bool linked;
- if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
+ if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags))
return;
if (rtw_phy_dig_check_damping(dm_info))
@@ -461,7 +461,6 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
chip->ops->dpk_track(rtwdev);
}
-#define CCK_PD_LV_MAX 5
#define CCK_PD_FA_LV1_MIN 1000
#define CCK_PD_FA_LV0_MAX 500
@@ -471,10 +470,10 @@ static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -494,15 +493,15 @@ static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
- return 4;
+ return CCK_PD_LV4;
if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
- return 3;
+ return CCK_PD_LV3;
if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
- return 2;
+ return CCK_PD_LV2;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -539,6 +538,11 @@ static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
chip->ops->cck_pd_set(rtwdev, level);
}
+static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
+{
+ rtwdev->chip->ops->pwr_track(rtwdev);
+}
+
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
{
/* for further calculation */
@@ -547,6 +551,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cck_pd(rtwdev);
rtw_phy_ra_info_update(rtwdev);
rtw_phy_dpk_track(rtwdev);
+ rtw_phy_pwr_track(rtwdev);
}
#define FRAC_BITS 3
@@ -1211,10 +1216,8 @@ static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
- const struct phy_pg_cfg_pair *p = tbl->data;
- const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
-
- BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
+ const struct rtw_phy_pg_cfg_pair *p = tbl->data;
+ const struct rtw_phy_pg_cfg_pair *end = p + tbl->size;
for (; p < end; p++) {
if (p->addr == 0xfe || p->addr == 0xffe) {
@@ -1748,7 +1751,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
group = rtw_get_channel_group(ch);
/* base power index for 2.4G/5G */
- if (ch <= 14) {
+ if (IS_CH_2G_BAND(ch)) {
band = PHY_BAND_2G;
*base = rtw_phy_get_2g_tx_power_index(rtwdev,
&pwr_idx->pwr_idx_2g,
@@ -1968,3 +1971,123 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
rs);
}
+
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table)
+{
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ u8 channel = rtwdev->hal.current_channel;
+
+ if (IS_CH_2G_BAND(channel)) {
+ if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+ } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
+ } else if (IS_CH_5G_BAND_3(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
+ } else if (IS_CH_5G_BAND_4(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+}
+
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ ewma_thermal_add(&dm_info->avg_thermal[path], thermal);
+ dm_info->thermal_avg[path] =
+ ewma_thermal_read(&dm_info->avg_thermal[path]);
+}
+
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]);
+
+ if (avg == thermal)
+ return false;
+
+ return true;
+}
+
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 therm_avg, therm_efuse, therm_delta;
+
+ therm_avg = dm_info->thermal_avg[path];
+ therm_efuse = rtwdev->efuse.thermal_meter[path];
+ therm_delta = abs(therm_avg - therm_efuse);
+
+ return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
+}
+
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const u8 *delta_swing_table_idx_pos;
+ const u8 *delta_swing_table_idx_neg;
+
+ if (delta >= RTW_PWR_TRK_TBL_SZ) {
+ rtw_warn(rtwdev, "power track table overflow\n");
+ return 0;
+ }
+
+ if (!swing_table) {
+ rtw_warn(rtwdev, "swing table not configured\n");
+ return 0;
+ }
+
+ delta_swing_table_idx_pos = swing_table->p[tbl_path];
+ delta_swing_table_idx_neg = swing_table->n[tbl_path];
+
+ if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) {
+ rtw_warn(rtwdev, "invalid swing table index\n");
+ return 0;
+ }
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ return delta_swing_table_idx_pos[delta];
+ else
+ return -delta_swing_table_idx_neg[delta];
+}
+
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 delta_iqk;
+
+ delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k);
+ if (delta_iqk >= rtwdev->chip->iqk_threshold) {
+ dm_info->thermal_meter_k = dm_info->thermal_avg[0];
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index e79b084628e7..af916d8784cd 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -41,9 +41,21 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_init_tx_power(struct rtw_dev *rtwdev);
void rtw_phy_load_tables(struct rtw_dev *rtwdev);
+u8 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bw, u8 channel, u8 regd);
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path);
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path);
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta);
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table);
struct rtw_txpwr_lmt_cfg_pair {
u8 regd;
@@ -54,6 +66,15 @@ struct rtw_txpwr_lmt_cfg_pair {
s8 txpwr_lmt;
};
+struct rtw_phy_pg_cfg_pair {
+ u32 band;
+ u32 rf_path;
+ u32 tx_num;
+ u32 addr;
+ u32 bitmask;
+ u32 data;
+};
+
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
@@ -125,6 +146,15 @@ rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path,
u8 rate, u8 bw, u8 ch, u8 regd,
struct rtw_power_params *pwr_param);
+enum rtw_phy_cck_pd_lv {
+ CCK_PD_LV0,
+ CCK_PD_LV1,
+ CCK_PD_LV2,
+ CCK_PD_LV3,
+ CCK_PD_LV4,
+ CCK_PD_LV_MAX,
+};
+
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 9ecd14feb76b..2226e3e7d7f8 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "reg.h"
#include "fw.h"
#include "ps.h"
#include "mac.h"
@@ -18,14 +19,14 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
+ clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+ set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -61,6 +62,85 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
return 0;
}
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
+{
+ u8 request, confirm, polling;
+ u8 polling_cnt;
+ u8 retry_cnt = 0;
+
+ for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) {
+ request = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+ confirm = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+
+ /* toggle to request power mode, others remain 0 */
+ request ^= request | BIT_RPWM_TOGGLE;
+ if (!enter) {
+ request |= POWER_MODE_ACK;
+ } else {
+ request |= POWER_MODE_LCLK;
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ request |= POWER_MODE_PG;
+ }
+
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ if (enter)
+ return;
+
+ /* check confirm power mode has left power save state */
+ for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+ if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
+ return;
+ mdelay(20);
+ }
+
+ /* in case of fw/hw missed the request, retry */
+ rtw_warn(rtwdev, "failed to leave deep PS, retry=%d\n",
+ retry_cnt);
+ }
+
+ /* Hit here means that driver failed to change hardware power mode to
+ * active state after retry 3 times. If the power state is locked at
+ * Deep sleep, most of the hardware circuits is not working, even
+ * register read/write. It should be treated as fatal error and
+ * requires an entire analysis about the firmware/hardware
+ */
+ WARN(1, "Hardware power state locked\n");
+}
+EXPORT_SYMBOL(rtw_power_mode_change);
+
+static void __rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ rtw_hci_deep_ps(rtwdev, false);
+}
+
+static void rtw_fw_leave_lps_state_check(struct rtw_dev *rtwdev)
+{
+ int i;
+
+ /* Driver needs to wait for firmware to leave LPS state
+ * successfully. Firmware will send null packet to inform AP,
+ * and see if AP sends an ACK back, then firmware will restore
+ * the REG_TCR register.
+ *
+ * If driver does not wait for firmware, null packet with
+ * PS bit could be sent due to incorrect REG_TCR setting.
+ *
+ * In our test, 100ms should be enough for firmware to finish
+ * the flow. If REG_TCR Register is still incorrect after 100ms,
+ * just modify it directly, and throw a warn message.
+ */
+ for (i = 0 ; i < LEAVE_LPS_TRY_CNT; i++) {
+ if (rtw_read32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN) == 0)
+ return;
+ msleep(20);
+ }
+
+ rtw_write32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN, 0);
+ rtw_warn(rtwdev, "firmware failed to restore hardware setting\n");
+}
+
static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -71,11 +151,30 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
conf->smart_ps = 0;
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_fw_leave_lps_state_check(rtwdev);
+
+ clear_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE);
}
+static void __rtw_enter_lps_deep(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->lps_conf.deep_mode == LPS_DEEP_MODE_NONE)
+ return;
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should enter LPS before entering deep PS\n");
+ return;
+ }
+
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_set_pg_info(rtwdev);
+
+ rtw_hci_deep_ps(rtwdev, true);
+}
+
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -88,88 +187,62 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE);
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
+ set_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
}
-void rtw_lps_work(struct work_struct *work)
+static void __rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
- struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
- lps_work.work);
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif = conf->rtwvif;
- if (WARN_ON(!rtwvif))
- return;
-
- if (conf->mode == RTW_MODE_LPS)
- rtw_enter_lps_core(rtwdev);
- else
- rtw_leave_lps_core(rtwdev);
-}
-
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
-{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
-
- if (rtwvif->in_lps)
+ if (test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
+ conf->port_id = port_id;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+ rtw_enter_lps_core(rtwdev);
}
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+static void __rtw_leave_lps(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- if (!rtwvif->in_lps)
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should leave deep PS before leaving LPS\n");
+ __rtw_leave_lps_deep(rtwdev);
+ }
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
-}
-
-bool rtw_in_lps(struct rtw_dev *rtwdev)
-{
- return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_leave_lps_core(rtwdev);
}
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ lockdep_assert_held(&rtwdev->mutex);
- if (WARN_ON(!rtwvif))
+ if (rtwdev->coex.stat.wl_force_lps_ctrl)
return;
- if (rtwvif->in_lps)
- return;
-
- conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
-
- rtw_enter_lps_core(rtwdev);
+ __rtw_enter_lps(rtwdev, port_id);
+ __rtw_enter_lps_deep(rtwdev);
}
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_leave_lps(struct rtw_dev *rtwdev)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
-
- if (WARN_ON(!rtwvif))
- return;
+ lockdep_assert_held(&rtwdev->mutex);
- if (!rtwvif->in_lps)
- return;
+ __rtw_leave_lps_deep(rtwdev);
+ __rtw_leave_lps(rtwdev);
+}
- conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ lockdep_assert_held(&rtwdev->mutex);
- rtw_leave_lps_core(rtwdev);
+ __rtw_leave_lps_deep(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
index 09e57405dc1b..19afceca7d0e 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.h
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -5,16 +5,20 @@
#ifndef __RTW_PS_H_
#define __RTW_PS_H_
-#define RTW_LPS_THRESHOLD 2
+#define RTW_LPS_THRESHOLD 50
+
+#define POWER_MODE_ACK BIT(6)
+#define POWER_MODE_PG BIT(4)
+#define POWER_MODE_LCLK BIT(0)
+
+#define LEAVE_LPS_TRY_CNT 5
int rtw_enter_ips(struct rtw_dev *rtwdev);
int rtw_leave_ips(struct rtw_dev *rtwdev);
-void rtw_lps_work(struct work_struct *work);
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-bool rtw_in_lps(struct rtw_dev *rtwdev);
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter);
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
+void rtw_leave_lps(struct rtw_dev *rtwdev);
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index fe793e270d22..7e817bc997eb 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -239,6 +239,10 @@
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
#define REG_EDCA_BK_PARAM 0x050C
+#define BIT_MASK_TXOP_LMT GENMASK(26, 16)
+#define BIT_MASK_CWMAX GENMASK(15, 12)
+#define BIT_MASK_CWMIN GENMASK(11, 8)
+#define BIT_MASK_AIFS GENMASK(7, 0)
#define REG_PIFS 0x0512
#define REG_SIFS 0x0514
#define BIT_SHIFT_SIFS_OFDM_CTX 8
@@ -271,6 +275,7 @@
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
+#define BIT_PWRMGT_HWDATA_EN BIT(7)
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
@@ -305,6 +310,7 @@
#define REG_RX_PKT_LIMIT 0x060C
#define REG_RX_DRVINFO_SZ 0x060F
#define BIT_APP_PHYSTS BIT(28)
+#define REG_MAR 0x0620
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
#define REG_RESP_SIFS_CCK 0x063C
@@ -320,6 +326,7 @@
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
+#define REG_RXFLTMAP4 0x068A
#define REG_BT_COEX_TABLE0 0x06C0
#define REG_BT_COEX_TABLE1 0x06C4
#define REG_BT_COEX_BRK_TABLE 0x06C8
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 63abda3b0ebf..4bc14b1a6340 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -13,6 +13,7 @@
#include "mac.h"
#include "reg.h"
#include "debug.h"
+#include "bf.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -43,6 +44,8 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -75,6 +78,56 @@ static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
}
+#define RTW_TXSCALE_SIZE 37
+static const u32 rtw8822b_txscale_tbl[RTW_TXSCALE_SIZE] = {
+ 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+ 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+ 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+ 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static const u8 rtw8822b_get_swing_index(struct rtw_dev *rtwdev)
+{
+ u8 i = 0;
+ u32 swing, table_value;
+
+ swing = rtw_read32_mask(rtwdev, 0xc1c, 0xffe00000);
+ for (i = 0; i < RTW_TXSCALE_SIZE; i++) {
+ table_value = rtw8822b_txscale_tbl[i];
+ if (swing == table_value)
+ break;
+ }
+
+ return i;
+}
+
+static void rtw8822b_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 swing_idx = rtw8822b_get_swing_index(rtwdev);
+ u8 path;
+
+ if (swing_idx >= RTW_TXSCALE_SIZE)
+ dm_info->default_ofdm_index = 24;
+ else
+ dm_info->default_ofdm_index = swing_idx;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8822b_phy_bf_init(struct rtw_dev *rtwdev)
+{
+ rtw_bf_phy_init(rtwdev);
+ /* Grouping bitmap parameters */
+ rtw_write32(rtwdev, 0x1C94, 0xAFFFAFFF);
+}
+
static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
@@ -106,6 +159,9 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
+ rtw8822b_pwrtrack_init(rtwdev);
+
+ rtw8822b_phy_bf_init(rtwdev);
}
#define WLAN_SLOT_TIME 0x09
@@ -211,9 +267,8 @@ static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
@@ -241,9 +296,8 @@ static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
/* signal source */
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
@@ -255,7 +309,7 @@ static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
if (hal->antenna_rx == BB_PATH_AB ||
hal->antenna_tx == BB_PATH_AB) {
/* 2TX or 2RX */
@@ -337,6 +391,7 @@ struct rtw8822b_rfe_info {
static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
[2] = I2GE5G_CCUT(efem),
+ [3] = IFEM_EXT_CCUT(ifem),
[5] = IFEM_EXT_CCUT(ifem),
};
@@ -350,7 +405,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u32 reg82c, reg830, reg838;
bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
cca_ccut = rfe_info->cca_ccut_2g;
if (hal->antenna_rx == BB_PATH_A ||
@@ -381,7 +436,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
is_efem_cca = true;
break;
case RTW_RFE_IFEM2G_EFEM5G:
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
is_ifem_cca = true;
else
is_efem_cca = true;
@@ -405,9 +460,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
- if (bw == RTW_CHANNEL_WIDTH_20 &&
- ((channel >= 52 && channel <= 64) ||
- (channel >= 100 && channel <= 144)))
+ if (bw == RTW_CHANNEL_WIDTH_20 && IS_CH_5G_BAND_MID(channel))
rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
}
@@ -442,7 +495,7 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
if (channel > 144)
rf_reg18 |= RF18_RFSI_GT_CH144;
@@ -464,13 +517,13 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
rf_reg_be = 0x0;
- else if (channel >= 36 && channel <= 64)
+ else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rf_reg_be = low_band[(channel - 36) >> 1];
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg_be = middle_band[(channel - 100) >> 1];
- else if (channel >= 149 && channel <= 177)
+ else if (IS_CH_5G_BAND_4(channel))
rf_reg_be = high_band[(channel - 149) >> 1];
else
goto err;
@@ -539,7 +592,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 rfe_option = efuse->rfe_option;
u32 val32;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
@@ -556,22 +609,22 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
- if (channel >= 36 && channel <= 64)
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
- else if (channel >= 149)
+ else if (IS_CH_5G_BAND_4(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
- if (channel >= 36 && channel <= 48)
+ if (IS_CH_5G_BAND_1(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
- else if (channel >= 52 && channel <= 64)
+ else if (IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
else if (channel >= 100 && channel <= 116)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
@@ -612,7 +665,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
- if (rfe_option == 2) {
+ if (rfe_option == 2 || rfe_option == 3) {
rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
}
@@ -763,6 +816,7 @@ static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 min_rx_power = -120;
u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
@@ -772,13 +826,19 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -801,6 +861,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -836,7 +924,8 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -946,6 +1035,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
@@ -969,6 +1059,15 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0xf08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable) {
+ cca32_cnt = rtw_read32(rtwdev, 0xfcc);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+ }
+
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
@@ -1255,6 +1354,195 @@ static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822b_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path,
+ u8 tx_pwr_idx_offset,
+ s8 *txagc_idx, u8 *swing_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 delta_pwr_idx = dm_info->delta_power_index[path];
+ u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+ u8 swing_lower_bound = 0;
+ u8 max_tx_pwr_idx_offset = 0xf;
+ s8 agc_index = 0;
+ u8 swing_index = dm_info->default_ofdm_index;
+
+ tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset);
+
+ if (delta_pwr_idx >= 0) {
+ if (delta_pwr_idx <= tx_pwr_idx_offset) {
+ agc_index = delta_pwr_idx;
+ swing_index = dm_info->default_ofdm_index;
+ } else if (delta_pwr_idx > tx_pwr_idx_offset) {
+ agc_index = tx_pwr_idx_offset;
+ swing_index = dm_info->default_ofdm_index +
+ delta_pwr_idx - tx_pwr_idx_offset;
+ swing_index = min_t(u8, swing_index, swing_upper_bound);
+ }
+ } else {
+ if (dm_info->default_ofdm_index > abs(delta_pwr_idx))
+ swing_index =
+ dm_info->default_ofdm_index + delta_pwr_idx;
+ else
+ swing_index = swing_lower_bound;
+ swing_index = max_t(u8, swing_index, swing_lower_bound);
+
+ agc_index = 0;
+ }
+
+ if (swing_index >= RTW_TXSCALE_SIZE) {
+ rtw_warn(rtwdev, "swing index overflow\n");
+ swing_index = RTW_TXSCALE_SIZE - 1;
+ }
+ *txagc_idx = agc_index;
+ *swing_idx = swing_index;
+}
+
+static void rtw8822b_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path,
+ u8 pwr_idx_offset)
+{
+ s8 txagc_idx;
+ u8 swing_idx;
+ u32 reg1, reg2;
+
+ if (path == RF_PATH_A) {
+ reg1 = 0xc94;
+ reg2 = 0xc1c;
+ } else if (path == RF_PATH_B) {
+ reg1 = 0xe94;
+ reg2 = 0xe1c;
+ } else {
+ return;
+ }
+
+ rtw8822b_txagc_swing_offset(rtwdev, path, pwr_idx_offset,
+ &txagc_idx, &swing_idx);
+ rtw_write32_mask(rtwdev, reg1, GENMASK(29, 25), txagc_idx);
+ rtw_write32_mask(rtwdev, reg2, GENMASK(31, 21),
+ rtw8822b_txscale_tbl[swing_idx]);
+}
+
+static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 pwr_idx_offset, tx_pwr_idx;
+ u8 channel = rtwdev->hal.current_channel;
+ u8 band_width = rtwdev->hal.current_band_width;
+ u8 regd = rtwdev->regd.txpwr_regd;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 max_pwr_idx = rtwdev->chip->max_power_index;
+
+ tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate,
+ band_width, channel, regd);
+
+ tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+ pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+
+ rtw8822b_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset);
+}
+
+static void rtw8822b_phy_pwrtrack_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 power_idx_cur, power_idx_last;
+ u8 delta;
+
+ /* 8822B only has one thermal meter at PATH A */
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ power_idx_last = dm_info->delta_power_index[path];
+ power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table,
+ path, RF_PATH_A, delta);
+
+ /* if delta of power indexes are the same, just skip */
+ if (power_idx_cur == power_idx_last)
+ return;
+
+ dm_info->delta_power_index[path] = power_idx_cur;
+ rtw8822b_pwrtrack_set(rtwdev, path);
+}
+
+static void rtw8822b_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, path;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++)
+ rtw8822b_phy_pwrtrack_path(rtwdev, &swing_table, path);
+
+iqk:
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822b_do_iqk(rtwdev);
+}
+
+static void rtw8822b_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8822b_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8822b_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822b_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822b_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -1754,6 +2042,7 @@ static struct rtw_intf_phy_para_table phy_para_table_8822b = {
static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822b, 2, 2),
+ [3] = RTW_DEF_RFE(8822b, 3, 0),
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
@@ -1801,6 +2090,10 @@ static struct rtw_chip_ops rtw8822b_ops = {
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.phy_calibration = rtw8822b_phy_calibration,
+ .pwr_track = rtw8822b_pwr_track,
+ .config_bfee = rtw8822b_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -1955,6 +2248,129 @@ static const struct coex_rf_para rf_para_rx_8822b[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b));
+static const u8
+rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822b_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822b_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822b_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822b_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822b_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822b_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822b_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -1977,6 +2393,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
.sys_func_en = 0xDC,
.pwr_on_seq = card_enable_flow_8822b,
.pwr_off_seq = card_disable_flow_8822b,
@@ -1992,6 +2409,10 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+ .pwr_track_tbl = &rtw8822b_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 0cb93d7d4cfd..6211f4b547b9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -127,6 +127,18 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
index 465f58411cab..b9010b111a13 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -11643,104 +11643,155 @@ static const u32 rtw8822b_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822b_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822b_bb_pg_type2[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type2[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type2);
-static const u32 rtw8822b_bb_pg_type5[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type3[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type3);
+
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type5[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type5);
@@ -20382,6 +20433,596 @@ static const u32 rtw8822b_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 26, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 28, },
+ { 2, 1, 0, 1, 144, 32, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 32, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 26, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 32, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 32, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 32, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 32, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 32, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 22, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 30, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 24, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 24, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 30, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 30, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 30, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 30, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 30, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 20, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 22, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 22, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 20, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 20, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 20, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 30, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 30, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 18, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 18, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 30, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 30, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type0);
+
static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = {
{ 0, 0, 0, 0, 1, 32, },
{ 2, 0, 0, 0, 1, 28, },
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
index d4c268889368..4140e1ccb7b1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
@@ -9,9 +9,11 @@ extern const struct rtw_table rtw8822b_mac_tbl;
extern const struct rtw_table rtw8822b_agc_tbl;
extern const struct rtw_table rtw8822b_bb_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type3_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
extern const struct rtw_table rtw8822b_rf_a_tbl;
extern const struct rtw_table rtw8822b_rf_b_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type0_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index c2f6cd76a658..174029836833 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "util.h"
+#include "bf.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -40,6 +41,11 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->path_a_thermal;
+ efuse->thermal_meter[RF_PATH_B] = map->path_b_thermal;
+ efuse->thermal_meter_k =
+ (map->path_a_thermal + map->path_b_thermal) >> 1;
+ efuse->power_track_type = (map->tx_pwr_calibrate_rate >> 4) & 0xf;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -1000,6 +1006,21 @@ static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
rtw8822c_rf_x2_check(rtwdev);
}
+static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) {
+ dm_info->delta_power_index[path] = 0;
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->thermal_avg[path] = 0xff;
+ }
+
+ dm_info->pwr_trk_triggered = false;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -1047,6 +1068,9 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
+ rtw8822c_pwrtrack_init(rtwdev);
+
+ rtw_bf_phy_init(rtwdev);
}
#define WLAN_TXQ_RPT_EN 0x1F
@@ -1088,8 +1112,8 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_AMPDU_MAX_TIME 0x70
#define WLAN_RTS_LEN_TH 0xFF
#define WLAN_RTS_TX_TIME_TH 0x08
-#define WLAN_MAX_AGG_PKT_LIMIT 0x20
-#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_MAX_AGG_PKT_LIMIT 0x3f
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x3f
#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
#define FAST_EDCA_VO_TH 0x06
#define FAST_EDCA_VI_TH 0x06
@@ -1112,6 +1136,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
+#define WLAN_MULTI_ADDR 0xFFFFFFFF
#define WLAN_TX_FUNC_CFG1 0x30
#define WLAN_TX_FUNC_CFG2 0x30
@@ -1221,6 +1246,8 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
/* WMAC configuration */
+ rtw_write32(rtwdev, REG_MAR, WLAN_MULTI_ADDR);
+ rtw_write32(rtwdev, REG_MAR + 4, WLAN_MULTI_ADDR);
rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
@@ -1284,11 +1311,11 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
- if (channel > 144)
+ if (IS_CH_5G_BAND_4(channel))
rf_reg18 |= RF18_RFSI_GT_CH140;
- else if (channel >= 80)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg18 |= RF18_RFSI_GE_CH80;
switch (bw) {
@@ -1338,7 +1365,7 @@ static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
@@ -1403,7 +1430,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
else
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
@@ -1411,17 +1438,17 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
- if (channel >= 36 && channel <= 64) {
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x1);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x1);
- } else if (channel >= 100 && channel <= 144) {
+ } else if (IS_CH_5G_BAND_3(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x2);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x2);
- } else if (channel >= 149) {
+ } else if (IS_CH_5G_BAND_4(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x3);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
@@ -1616,6 +1643,8 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
u8 gain_a, gain_b;
s8 rx_power[RTW_RF_PATH_MAX];
s8 min_rx_power = -120;
+ u8 rssi;
+ int path;
rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
@@ -1638,6 +1667,11 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ }
+
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
@@ -1647,8 +1681,13 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -1669,6 +1708,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -1704,7 +1771,8 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -1822,6 +1890,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
@@ -1866,6 +1935,13 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0x2c08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable)
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
@@ -2053,6 +2129,57 @@ static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822c_bf_enable_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 csi_rsc = 0;
+ u32 tmp6dc;
+
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+
+ tmp6dc = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc & ~BIT(12));
+
+ rtw_write32(rtwdev, REG_CSI_RRSR, 0x550);
+}
+
+static void rtw8822c_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw8822c_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822c_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822c_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
struct dpk_cfg_pair {
u32 addr;
u32 bitmask;
@@ -2603,9 +2730,9 @@ static bool rtw8822c_dpk_coef_iq_check(struct rtw_dev *rtwdev,
{
if (coef_i == 0x1000 || coef_i == 0x0fff ||
coef_q == 0x1000 || coef_q == 0x0fff)
- return 1;
- else
- return 0;
+ return true;
+
+ return false;
}
static u32 rtw8822c_dpk_coef_transfer(struct rtw_dev *rtwdev)
@@ -2843,7 +2970,7 @@ static void rtw8822c_dpk_cal_gs(struct rtw_dev *rtwdev, u8 path)
dpk_info->dpk_gs[path] = tmp_gs;
}
-void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u32 offset[DPK_RF_PATH_NUM] = {0, 0x58};
@@ -3084,7 +3211,7 @@ static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev)
rtw8822c_do_dpk(rtwdev);
}
-void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u8 path;
@@ -3168,8 +3295,8 @@ rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev,
static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- s8 pd_lvl[4] = {2, 4, 6, 8};
- s8 cs_lvl[4] = {2, 2, 2, 4};
+ s8 pd_lvl[CCK_PD_LV_MAX] = {0, 2, 4, 6, 8};
+ s8 cs_lvl[CCK_PD_LV_MAX] = {0, 2, 2, 2, 4};
u8 cur_lvl;
u8 nrx, bw;
@@ -3191,6 +3318,87 @@ static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_pd_lv[bw][nrx] = new_lvl;
}
+#define PWR_TRACK_MASK 0x7f
+static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ rtw_write32_mask(rtwdev, 0x18a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ case RF_PATH_B:
+ rtw_write32_mask(rtwdev, 0x41a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 thermal_value, delta;
+
+ if (rtwdev->efuse.thermal_meter[path] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+
+ dm_info->delta_power_index[path] =
+ rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
+ delta);
+
+ rtw8822c_pwrtrack_set(rtwdev, path);
+}
+
+static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_swing_table swing_table;
+ u8 i;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822c_do_iqk(rtwdev);
+}
+
+static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ __rtw8822c_pwr_track(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -3571,6 +3779,10 @@ static struct rtw_chip_ops rtw8822c_ops = {
.dpk_track = rtw8822c_dpk_track,
.phy_calibration = rtw8822c_phy_calibration,
.cck_pd_set = rtw8822c_phy_cck_pd_set,
+ .pwr_track = rtw8822c_pwr_track,
+ .config_bfee = rtw8822c_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -3725,6 +3937,129 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
+static const u8
+rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 4, 5, 6, 7, 8,
+ 9, 9, 10, 11, 12, 13, 14, 15, 15, 16,
+ 17, 18, 19, 20, 20, 21, 22, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11, 12, 13,
+ 13, 14, 15, 15, 16, 17, 17, 18, 19, 19
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 25, 26, 27
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7,
+ 8, 9, 9, 10, 11, 12, 12, 13, 14, 15,
+ 15, 16, 17, 18, 18, 19, 20, 21, 21, 22
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 18, 18, 19, 20, 21, 22, 23, 24, 24, 25
+};
+
+static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822c_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822c_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822c_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822c_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822c_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822c_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822c_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -3747,6 +4082,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
.sys_func_en = 0xD8,
.pwr_on_seq = card_enable_flow_8822c,
.pwr_off_seq = card_disable_flow_8822c,
@@ -3765,6 +4101,10 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
.en_dis_dpd = true,
.dpd_ratemask = DIS_DPD_RATEALL,
+ .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 438db74d8e7a..abd9f300bedd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -149,6 +149,18 @@ const struct rtw_table name ## _tbl = { \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index e2dd4c766077..d102a2c27757 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -1762,53 +1762,53 @@ static const u32 rtw8822c_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822c_bb_pg_type0[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044
+static const struct rtw_phy_pg_cfg_pair rtw8822c_bb_pg_type0[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
};
RTW_DECL_TABLE_BB_PG(rtw8822c_bb_pg_type0);
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 48b9ed49b79a..9b90339ab697 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -5,6 +5,7 @@
#include "main.h"
#include "rx.h"
#include "ps.h"
+#include "debug.h"
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
@@ -25,8 +26,6 @@ void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.rx_unicast += skb->len;
rtwvif->stats.rx_cnt++;
- if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -39,6 +38,60 @@ struct rtw_rx_addr_match_data {
u8 *bssid;
};
+static void rtw_rx_phy_stat(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_pkt_count *cur_pkt_cnt = &dm_info->cur_pkt_count;
+ u8 rate_ss, rate_ss_evm, evm_id;
+ u8 i, idx;
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ cur_pkt_cnt->num_bcn_pkt++;
+
+ switch (pkt_stat->rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ goto pkt_num;
+ case DESC_RATE6M...DESC_RATE54M:
+ rate_ss = 0;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_OFDM;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS7:
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT1SS_MCS9:
+ rate_ss = 1;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_1SS;
+ break;
+ case DESC_RATEMCS8...DESC_RATEMCS15:
+ case DESC_RATEVHT2SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rate_ss = 2;
+ rate_ss_evm = 2;
+ evm_id = RTW_EVM_2SS_A;
+ break;
+ default:
+ rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate);
+ return;
+ }
+
+ for (i = 0; i < rate_ss_evm; i++) {
+ idx = evm_id + i;
+ ewma_evm_add(&dm_info->ewma_evm[idx],
+ dm_info->rx_evm_dbm[i]);
+ }
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
+ idx = RTW_SNR_OFDM_A + 4 * rate_ss + i;
+ ewma_snr_add(&dm_info->ewma_snr[idx],
+ dm_info->rx_snr[i]);
+ }
+pkt_num:
+ cur_pkt_cnt->num_qry_pkt[pkt_stat->rate]++;
+}
+
static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -50,14 +103,16 @@ static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
u8 *bssid = iter_data->bssid;
- if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
- (ether_addr_equal(vif->addr, hdr->addr1) ||
- ieee80211_is_beacon(hdr->frame_control)))
- sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
- vif->addr);
- else
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
return;
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ rtw_rx_phy_stat(rtwdev, pkt_stat, hdr);
+ sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
+ vif->addr);
if (!sta)
return;
@@ -105,35 +160,17 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
else if (pkt_stat->rate >= DESC_RATEMCS0)
rx_status->encoding = RX_ENC_HT;
- if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
- rx_status->nss = 1;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
- rx_status->nss = 2;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
- rx_status->nss = 3;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
- rx_status->nss = 4;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEMCS0 &&
- pkt_stat->rate <= DESC_RATEMCS15) {
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
- } else if (rx_status->band == NL80211_BAND_5GHZ &&
- pkt_stat->rate >= DESC_RATE6M &&
- pkt_stat->rate <= DESC_RATE54M) {
+ if (rx_status->band == NL80211_BAND_5GHZ &&
+ pkt_stat->rate >= DESC_RATE6M &&
+ pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
} else if (rx_status->band == NL80211_BAND_2GHZ &&
pkt_stat->rate >= DESC_RATE1M &&
pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
- } else {
- rx_status->rate_idx = 0;
+ } else if (pkt_stat->rate >= DESC_RATEMCS0) {
+ rtw_desc_to_mcsrate(pkt_stat->rate, &rx_status->rate_idx,
+ &rx_status->nss);
}
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index 383f3b2babc1..3342e3761281 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -5,6 +5,15 @@
#ifndef __RTW_RX_H_
#define __RTW_RX_H_
+enum rtw_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
#define GET_RX_DESC_PHYST(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
#define GET_RX_DESC_ICV_ERR(rxdesc) \
@@ -21,6 +30,8 @@
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
#define GET_RX_DESC_SHIFT(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
+#define GET_RX_DESC_ENC_TYPE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(22, 20))
#define GET_RX_DESC_RX_RATE(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
#define GET_RX_DESC_MACID(rxdesc) \
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
index c594fc02804d..d0d7fbb10d58 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.c
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -96,6 +96,27 @@ void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
}
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam)
+{
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u8 offset = 0;
+ u8 count, n;
+
+ if (!used_cam)
+ return 0;
+
+ for (count = 0; count < MAX_PG_CAM_BACKUP_NUM; count++) {
+ n = find_next_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM, offset);
+ if (n == RTW_MAX_SEC_CAM_NUM)
+ break;
+
+ used_cam[count] = n;
+ offset = n + 1;
+ }
+
+ return count;
+}
+
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
{
struct rtw_sec_desc *sec = &rtwdev->sec;
diff --git a/drivers/net/wireless/realtek/rtw88/sec.h b/drivers/net/wireless/realtek/rtw88/sec.h
index 8c50a895c797..efcf45433999 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.h
+++ b/drivers/net/wireless/realtek/rtw88/sec.h
@@ -34,6 +34,7 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev,
void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
u8 hw_key_idx);
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam);
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 8eaa9809ca44..24c39c60c99a 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -27,8 +27,6 @@ void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.tx_unicast += skb->len;
rtwvif->stats.tx_cnt++;
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -58,6 +56,7 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
+ SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
@@ -260,6 +259,9 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
ampdu_density = get_tx_ampdu_density(sta);
}
+ if (info->control.use_rts)
+ pkt_info->rts = true;
+
if (sta->vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
else if (sta->ht_cap.ht_supported)
@@ -365,3 +367,132 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->qsel = TX_DESC_QSEL_MGMT;
pkt_info->ls = true;
}
+
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_tx_pkt_info pkt_info = {0};
+
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
+ if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ goto out;
+
+ return;
+
+out:
+ ieee80211_free_txskb(rtwdev->hw, skb);
+}
+
+static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_info *info;
+ struct rtw_sta_info *si;
+
+ if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ return;
+ }
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ if (!txq->sta)
+ return;
+
+ si = (struct rtw_sta_info *)txq->sta->drv_priv;
+ set_bit(txq->tid, si->tid_ba);
+
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
+}
+
+static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_control control;
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
+ if (!skb)
+ return false;
+
+ rtw_txq_check_agg(rtwdev, rtwtxq, skb);
+
+ control.sta = txq->sta;
+ rtw_tx(rtwdev, &control, skb);
+ rtwtxq->last_push = jiffies;
+
+ return true;
+}
+
+static void rtw_txq_push(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ unsigned long frames)
+{
+ int i;
+
+ rcu_read_lock();
+
+ for (i = 0; i < frames; i++)
+ if (!rtw_txq_dequeue(rtwdev, rtwtxq))
+ break;
+
+ rcu_read_unlock();
+}
+
+void rtw_tx_tasklet(unsigned long data)
+{
+ struct rtw_dev *rtwdev = (void *)data;
+ struct rtw_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
+
+ list_del_init(&rtwtxq->list);
+ }
+
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
+
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ INIT_LIST_HEAD(&rtwtxq->list);
+}
+
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (!list_empty(&rtwtxq->list))
+ list_del_init(&rtwtxq->list);
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 8338dbf55576..9ca4f74a501b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -35,6 +35,8 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
+#define SET_TX_DESC_USE_RTS(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(12))
#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
#define SET_TX_DESC_DATA_STBC(txdesc, value) \
@@ -75,6 +77,12 @@ enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_H2C = 19,
};
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_tx_tasklet(unsigned long data);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 212070c2baa8..10f1117c0cfb 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -70,3 +70,30 @@ void rtw_restore_reg(struct rtw_dev *rtwdev,
}
}
}
+
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE54M)
+ return;
+
+ if (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT1SS_MCS9) {
+ *nss = 1;
+ *mcs = rate - DESC_RATEVHT1SS_MCS0;
+ } else if (rate >= DESC_RATEVHT2SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9) {
+ *nss = 2;
+ *mcs = rate - DESC_RATEVHT2SS_MCS0;
+ } else if (rate >= DESC_RATEVHT3SS_MCS0 &&
+ rate <= DESC_RATEVHT3SS_MCS9) {
+ *nss = 3;
+ *mcs = rate - DESC_RATEVHT3SS_MCS0;
+ } else if (rate >= DESC_RATEVHT4SS_MCS0 &&
+ rate <= DESC_RATEVHT4SS_MCS9) {
+ *nss = 4;
+ *mcs = rate - DESC_RATEVHT4SS_MCS0;
+ } else if (rate >= DESC_RATEMCS0 &&
+ rate <= DESC_RATEMCS15) {
+ *mcs = rate - DESC_RATEMCS0;
+ }
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 6c7f26ef6476..9cc8a335d519 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common)
skb_pull(skb, (64 - dword_align_bytes));
if (rsi_prepare_beacon(common, skb)) {
rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
+ dev_kfree_skb(skb);
return -EINVAL;
}
skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 760eaffeebd6..53f41fc2cadf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -785,10 +785,10 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
- if (id && id->idProduct == RSI_USB_PID_9113) {
+ if (id->idProduct == RSI_USB_PID_9113) {
rsi_dbg(INIT_ZONE, "%s: 9113 module detected\n", __func__);
adapter->device_model = RSI_DEV_9113;
- } else if (id && id->idProduct == RSI_USB_PID_9116) {
+ } else if (id->idProduct == RSI_USB_PID_9116) {
rsi_dbg(INIT_ZONE, "%s: 9116 module detected\n", __func__);
adapter->device_model = RSI_DEV_9116;
} else {
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 6574e78e05ea..2a03dc533b6a 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -320,12 +320,12 @@ int cw1200_load_firmware(struct cw1200_common *priv)
goto out;
}
- priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
- if (priv->hw_type < 0) {
+ ret = cw1200_get_hw_type(val32, &major_revision);
+ if (ret < 0) {
pr_err("Can't deduce hardware type.\n");
- ret = -ENOTSUPP;
goto out;
}
+ priv->hw_type = ret;
/* Set DPLL Reg value, and read back to confirm writes work */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 14133eedb3b6..12952b1c29df 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -79,10 +79,9 @@ static void cw1200_queue_register_post_gc(struct list_head *gc_list,
struct cw1200_queue_item *item)
{
struct cw1200_queue_item *gc_item;
- gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
GFP_ATOMIC);
BUG_ON(!gc_item);
- memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
list_add_tail(&gc_item->head, gc_list);
}
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c46b044b7f7b..988581cc134b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -120,8 +120,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- if (frame.skb)
- dev_kfree_skb(frame.skb);
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 547ad538d8b6..e994995d79ab 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -544,11 +544,6 @@ static int wlcore_irq_locked(struct wl1271 *wl)
}
while (!done && loopcount--) {
- /*
- * In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip.
- */
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status);
@@ -668,7 +663,7 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ goto out_handled;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -692,6 +687,11 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
mutex_unlock(&wl->mutex);
+out_handled:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
return IRQ_HANDLED;
}
@@ -1434,7 +1434,7 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field = &filter->fields[filter->num_fields];
- field->pattern = kzalloc(len, GFP_KERNEL);
+ field->pattern = kmemdup(pattern, len, GFP_KERNEL);
if (!field->pattern) {
wl1271_warning("Failed to allocate RX filter pattern");
return -ENOMEM;
@@ -1445,7 +1445,6 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field->offset = cpu_to_le16(offset);
field->flags = flags;
field->len = len;
- memcpy(field->pattern, pattern, len);
return 0;
}
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 1cd113c8d7cb..ad0abb1f0bae 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
*fw_vsc_cfg, len);
if (r) {
- devm_kfree(dev, fw_vsc_cfg);
+ devm_kfree(dev, *fw_vsc_cfg);
goto vsc_read_err;
}
} else {
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 06f34fb4e0b0..ded0d03c0015 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -15,7 +15,7 @@ config NFC_MRVL_USB
Marvell NFC-over-USB driver.
This driver provides support for Marvell NFC-over-USB devices:
- 8897.
+ 8897.
Say Y here to compile support for Marvell NFC-over-USB driver
into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 307bd2afbe05..4d1909aecd6c 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) {
phy->hard_fault = r;
- skb = NULL;
- } else if (r < 0) {
+ if (info->mode == NXP_NCI_MODE_FW)
+ nxp_nci_fw_recv_frame(phy->ndev, NULL);
+ }
+ if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled;
}
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index aa766e7ece70..346e084387f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2643,13 +2643,17 @@ static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int rc;
- if (dev->phy_ops->dev_up)
- dev->phy_ops->dev_up(dev);
+ if (dev->phy_ops->dev_up) {
+ rc = dev->phy_ops->dev_up(dev);
+ if (rc)
+ return rc;
+ }
if ((dev->device_type == PN533_DEVICE_PN532) ||
(dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
- int rc = pn532_sam_configuration(nfc_dev);
+ rc = pn532_sam_configuration(nfc_dev);
if (rc)
return rc;
@@ -2665,7 +2669,7 @@ static int pn533_dev_down(struct nfc_dev *nfc_dev)
ret = pn533_rf_field(nfc_dev, 0);
if (dev->phy_ops->dev_down && !ret)
- dev->phy_ops->dev_down(dev);
+ ret = dev->phy_ops->dev_down(dev);
return ret;
}
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index b66f02a53167..5f94f38a2a08 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -224,8 +224,8 @@ struct pn533_phy_ops {
* bring up it's interface to the chip and have it suspended for power
* saving reasons otherwise.
*/
- void (*dev_up)(struct pn533 *priv);
- void (*dev_down)(struct pn533 *priv);
+ int (*dev_up)(struct pn533 *priv);
+ int (*dev_down)(struct pn533 *priv);
};
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 46e5ff16f699..a0665d8ea85b 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -100,20 +100,27 @@ static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags)
pn533_recv_frame(dev, NULL, -ENOENT);
}
-static void pn532_dev_up(struct pn533 *dev)
+static int pn532_dev_up(struct pn533 *dev)
{
struct pn532_uart_phy *pn532 = dev->phy;
+ int ret = 0;
+
+ ret = serdev_device_open(pn532->serdev);
+ if (ret)
+ return ret;
- serdev_device_open(pn532->serdev);
pn532->send_wakeup = PN532_SEND_LAST_WAKEUP;
+ return ret;
}
-static void pn532_dev_down(struct pn533 *dev)
+static int pn532_dev_down(struct pn533 *dev)
{
struct pn532_uart_phy *pn532 = dev->phy;
serdev_device_close(pn532->serdev);
pn532->send_wakeup = PN532_SEND_WAKEUP;
+
+ return 0;
}
static struct pn533_phy_ops uart_phy_ops = {
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index f9ac176cf257..2ce17932a073 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
NFC_PROTO_FELICA_MASK;
} else {
kfree_skb(nfcid_skb);
+ nfcid_skb = NULL;
/* P2P in type A */
r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
ST21NFCA_RF_READER_F_NFCID1,
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index fc99a40c1ec4..e0f064dcbd02 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->scan_lock);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
if (nvme_mpath_clear_current_path(ns))
kblockd_schedule_work(&ns->head->requeue_work);
+ up_read(&ctrl->namespaces_rwsem);
mutex_unlock(&ctrl->scan_lock);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f19a28b4e997..cb4c3000a57e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2133,8 +2133,16 @@ err_unreg_client:
static void __exit nvme_rdma_cleanup_module(void)
{
+ struct nvme_rdma_ctrl *ctrl;
+
nvmf_unregister_transport(&nvme_rdma_transport);
ib_unregister_client(&nvme_rdma_ib_client);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_rdma_init_module);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index bd6129db6417..c6b87ce2b0cc 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,8 +361,8 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
struct phy_device *phy;
int ret;
- iface = of_get_phy_mode(np);
- if ((int)iface < 0)
+ ret = of_get_phy_mode(np, &iface);
+ if (ret)
return NULL;
if (of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index b02734aff8c1..6e411821583e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -15,16 +15,20 @@
/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
+ * @interface: Pointer to the result
*
* The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type', and return its index in phy_modes table, or errno in
- * error case.
+ * 'phy-connection-type'. The index in phy_modes table is set in
+ * interface and 0 returned. In case of error interface is set to
+ * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
*/
-int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
{
const char *pm;
int err, i;
+ *interface = PHY_INTERFACE_MODE_NA;
+
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
err = of_property_read_string(np, "phy-connection-type", &pm);
@@ -32,8 +36,10 @@ int of_get_phy_mode(struct device_node *np)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i)))
- return i;
+ if (!strcasecmp(pm, phy_modes(i))) {
+ *interface = i;
+ return 0;
+ }
return -ENODEV;
}
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index c3fa1840f8de..174888609779 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -90,8 +90,8 @@ config TWL4030_USB
config PHY_TI_GMII_SEL
tristate
- default y if TI_CPSW=y
- depends on TI_CPSW || COMPILE_TEST
+ default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y
+ depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST
select GENERIC_PHY
select REGMAP
default m
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c6251eac8946..2c419fa5d1c1 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -147,6 +147,7 @@ struct chv_pin_context {
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
* @regs: MMIO registers
* @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
* offset (in GPIO number space)
@@ -162,6 +163,7 @@ struct chv_pinctrl {
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
+ struct irq_chip irqchip;
void __iomem *regs;
unsigned intr_lines[16];
const struct chv_community *community;
@@ -1466,16 +1468,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip chv_gpio_irqchip = {
- .name = "chv-gpio",
- .irq_startup = chv_gpio_irq_startup,
- .irq_ack = chv_gpio_irq_ack,
- .irq_mask = chv_gpio_irq_mask,
- .irq_unmask = chv_gpio_irq_unmask,
- .irq_set_type = chv_gpio_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1559,7 +1551,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
if (intsel >= community->nirqs)
- clear_bit(i, valid_mask);
+ clear_bit(desc->number, valid_mask);
}
}
@@ -1625,7 +1617,15 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
}
- ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ pctrl->irqchip.name = "chv-gpio";
+ pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
+ pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
+ pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
+ pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
+ pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
+ pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
+
+ ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
@@ -1642,7 +1642,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
}
- gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
+ gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq,
chv_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index bc013599a9a3..83981ad66a71 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -52,6 +52,7 @@
#define PADCFG0_GPIROUTNMI BIT(17)
#define PADCFG0_PMODE_SHIFT 10
#define PADCFG0_PMODE_MASK GENMASK(13, 10)
+#define PADCFG0_PMODE_GPIO 0
#define PADCFG0_GPIORXDIS BIT(9)
#define PADCFG0_GPIOTXDIS BIT(8)
#define PADCFG0_GPIORXSTATE BIT(1)
@@ -332,7 +333,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
- if (!mode)
+ if (mode == PADCFG0_PMODE_GPIO)
seq_puts(s, "GPIO ");
else
seq_printf(s, "mode %d ", mode);
@@ -458,6 +459,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
writel(value, padcfg0);
}
+static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
+{
+ return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+}
+
static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
{
u32 value;
@@ -491,7 +497,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
}
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ /*
+ * If pin is already configured in GPIO mode, we assume that
+ * firmware provides correct settings. In such case we avoid
+ * potential glitches on the pin. Otherwise, for the pin in
+ * alternative mode, consumer has to supply respective flags.
+ */
+ if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) {
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ return 0;
+ }
+
intel_gpio_set_gpio_mode(padcfg0);
+
/* Disable TX buffer and enable RX (this will be input) */
__intel_gpio_set_direction(padcfg0, true);
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 564660028fcc..ccdf0bb21414 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -585,19 +585,6 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
return stmfx_function_enable(pctl->stmfx, func);
}
-static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc,
- unsigned long *valid_mask,
- unsigned int ngpios)
-{
- struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
- u32 n;
-
- for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios)
- clear_bit(n, valid_mask);
-
- return 0;
-}
-
static int stmfx_pinctrl_probe(struct platform_device *pdev)
{
struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
@@ -660,7 +647,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
pctl->gpio_chip.can_sleep = true;
pctl->gpio_chip.of_node = np;
- pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask;
ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
if (ret) {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index c48ad23cfa3a..b45d2b86d8ca 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -121,7 +121,7 @@ config PTP_1588_CLOCK_KVM
config PTP_1588_CLOCK_IDTCM
tristate "IDT CLOCKMATRIX as PTP clock"
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
default n
help
This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 67d0199840fd..9d72ab593f13 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
}
- if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
- req.extts.rsv[0] || req.extts.rsv[1]) &&
- cmd == PTP_EXTTS_REQUEST2) {
- err = -EINVAL;
- break;
+ if (cmd == PTP_EXTTS_REQUEST2) {
+ /* Tell the drivers to check the flags carefully. */
+ req.extts.flags |= PTP_STRICT_FLAGS;
+ /* Make sure no reserved bit is set. */
+ if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+ req.extts.rsv[0] || req.extts.rsv[1]) {
+ err = -EINVAL;
+ break;
+ }
+ /* Ensure one of the rising/falling edge bits is set. */
+ if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
+ (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
+ err = -EINVAL;
+ break;
+ }
} else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index cf5889b7d825..a5110b7b4ece 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -1294,8 +1294,10 @@ static int idtcm_probe(struct i2c_client *client,
err = set_tod_write_overhead(idtcm);
- if (err)
+ if (err) {
+ mutex_unlock(&idtcm->reg_lock);
return err;
+ }
err = idtcm_load_firmware(idtcm, &client->dev);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 6ad51aa60c03..f877e77d9184 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -472,14 +472,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
if (err)
return err;
- /*
- * .apply might have to round some values in *state, if possible
- * read the actually implemented value back.
- */
- if (chip->ops->get_state)
- chip->ops->get_state(chip, pwm, &pwm->state);
- else
- pwm->state = *state;
+ pwm->state = *state;
} else {
/*
* FIXME: restore the initial state in case of error.
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 56c38cfae92c..1f829edd8ee7 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops iproc_pwm_ops = {
.apply = iproc_pwmc_apply,
.get_state = iproc_pwmc_get_state,
+ .owner = THIS_MODULE,
};
static int iproc_pwmc_probe(struct platform_device *pdev)
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 213ff40dda11..3c9a64c1b7a8 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -76,7 +76,6 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
* of_reset_simple_xlate - translate reset_spec to the reset line number
* @rcdev: a pointer to the reset controller device
* @reset_spec: reset line specifier as found in the device tree
- * @flags: a flags pointer to fill in (optional)
*
* This simple translation function should be used for reset controllers
* with 1:1 mapping, where reset lines can be indexed by number without gaps.
@@ -748,6 +747,7 @@ static void reset_control_array_put(struct reset_control_array *resets)
for (i = 0; i < resets->num_rstcs; i++)
__reset_control_put_internal(resets->rstc[i]);
mutex_unlock(&reset_list_mutex);
+ kfree(resets);
}
/**
@@ -825,9 +825,10 @@ int __device_reset(struct device *dev, bool optional)
}
EXPORT_SYMBOL_GPL(__device_reset);
-/**
+/*
* APIs to manage an array of reset controls.
*/
+
/**
* of_reset_control_get_count - Count number of resets available with a device
*
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 66eac2b9704d..1901e9c80ed8 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -32,8 +32,6 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
-#define ISM_ERROR 0xFFFF
-
struct ism_req_hdr {
u32 cmd;
u16 : 16;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d08154502b15..f81f1523d528 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -368,6 +368,7 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
+ QETH_HEADER_MASK_INVAL = 0x80,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -477,12 +478,16 @@ struct qeth_card_stats {
u64 rx_sg_frags;
u64 rx_sg_alloc_page;
+ u64 rx_dropped_nomem;
+ u64 rx_dropped_notsupp;
+
/* rtnl_link_stats64 */
u64 rx_packets;
u64 rx_bytes;
- u64 rx_errors;
- u64 rx_dropped;
u64 rx_multicast;
+ u64 rx_length_errors;
+ u64 rx_frame_errors;
+ u64 rx_fifo_errors;
};
struct qeth_out_q_stats {
@@ -819,7 +824,6 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
struct mutex ip_lock;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9e8bd8e08146..f1f56e354516 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1956,6 +1956,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
+ port |= QETH_IDX_ACT_INVAL_FRAME;
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -3093,7 +3094,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
- QETH_CARD_STAT_INC(card, rx_dropped);
+ QETH_CARD_STAT_INC(card, rx_fifo_errors);
return 0;
} else
return 1;
@@ -4346,7 +4347,9 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_NWAYTEST: /* N-way auto-neg test register */
break;
case MII_RERRCOUNTER: /* rx error counter */
- rc = card->stats.rx_errors;
+ rc = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
break;
case MII_SREVISION: /* silicon revision */
break;
@@ -4852,7 +4855,6 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -5062,13 +5064,14 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
+ bool use_rx_sg = false;
+ unsigned int headroom;
struct sk_buff *skb;
int skb_len = 0;
void *data_ptr;
int data_len;
- int headroom = 0;
- int use_rx_sg = 0;
+next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
@@ -5082,27 +5085,45 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
switch ((*hdr)->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = (*hdr)->hdr.l2.pkt_length;
+ headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
+ if (!IS_LAYER3(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
skb_len = (*hdr)->hdr.osn.pdu_length;
+ if (!IS_OSN(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = sizeof(struct qeth_hdr);
break;
default:
- break;
+ if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ QETH_CARD_STAT_INC(card, rx_frame_errors);
+ else
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+
+ /* Can't determine packet length, drop the whole buffer. */
+ return NULL;
}
if (!skb_len)
return NULL;
- if (((skb_len >= card->options.rx_sg_cb) &&
- !IS_OSN(card) &&
- (!atomic_read(&card->force_alloc_skb))) ||
- (card->options.cq == QETH_CQ_ENABLED))
- use_rx_sg = 1;
+ use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
+ ((skb_len >= card->options.rx_sg_cb) &&
+ !atomic_read(&card->force_alloc_skb) &&
+ !IS_OSN(card));
if (use_rx_sg && qethbuffer->rx_skb) {
/* QETH_CQ_ENABLED only: */
@@ -5113,15 +5134,18 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb = napi_alloc_skb(&card->napi, linear + headroom);
}
+
if (!skb)
- goto no_mem;
- if (headroom)
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ else if (headroom)
skb_reserve(skb, headroom);
+walk_packet:
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
- if (data_len) {
+
+ if (skb && data_len) {
if (use_rx_sg)
qeth_create_skb_frag(element, skb, offset,
data_len);
@@ -5133,8 +5157,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
if (qeth_is_last_sbale(element)) {
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
- dev_kfree_skb_any(skb);
- QETH_CARD_STAT_INC(card, rx_errors);
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ QETH_CARD_STAT_INC(card,
+ rx_length_errors);
+ }
return NULL;
}
element++;
@@ -5144,6 +5171,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
offset += data_len;
}
}
+
+ /* This packet was skipped, go get another one: */
+ if (!skb)
+ goto next_packet;
+
*__element = element;
*__offset = offset;
if (use_rx_sg) {
@@ -5152,12 +5184,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_shinfo(skb)->nr_frags);
}
return skb;
-no_mem:
- if (net_ratelimit()) {
- QETH_CARD_TEXT(card, 2, "noskbmem");
- }
- QETH_CARD_STAT_INC(card, rx_dropped);
- return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
@@ -5741,6 +5767,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
+ qeth_free_qdio_queues(card);
+
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -6236,9 +6264,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_packets = card->stats.rx_packets;
stats->rx_bytes = card->stats.rx_bytes;
- stats->rx_errors = card->stats.rx_errors;
- stats->rx_dropped = card->stats.rx_dropped;
+ stats->rx_errors = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
+ stats->rx_dropped = card->stats.rx_dropped_nomem +
+ card->stats.rx_dropped_notsupp;
stats->multicast = card->stats.rx_multicast;
+ stats->rx_length_errors = card->stats.rx_length_errors;
+ stats->rx_frame_errors = card->stats.rx_frame_errors;
+ stats->rx_fifo_errors = card->stats.rx_fifo_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 9ad0d6f9d48b..53fcf6641154 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -900,6 +900,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define IDX_ACTIVATE_SIZE 0x22
#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_ACT_INVAL_FRAME 0x40
#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9f392497d570..e81170ab6d9a 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -20,8 +20,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
switch (card->state) {
case CARD_STATE_DOWN:
@@ -45,8 +43,6 @@ static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%02X\n", card->info.chpid);
}
@@ -57,8 +53,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
+
return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
}
@@ -68,8 +63,6 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
}
@@ -94,8 +87,6 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
}
@@ -106,8 +97,6 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%i\n", card->dev->dev_port);
}
@@ -120,9 +109,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
unsigned int portno, limit;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -171,9 +157,6 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sprintf(buf, "%s\n", "by precedence");
@@ -195,9 +178,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (IS_IQD(card))
return -EOPNOTSUPP;
@@ -262,9 +242,6 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
@@ -276,9 +253,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
int cnt, old_cnt;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -307,9 +281,6 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
char *tmp;
int i;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
@@ -325,11 +296,6 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "1\n");
}
@@ -342,9 +308,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
bool reset;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
@@ -370,9 +333,6 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.layer);
}
@@ -385,9 +345,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
int i, rc = 0;
enum qeth_discipline_id newdis;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -453,9 +410,6 @@ static ssize_t qeth_dev_isolation_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
@@ -475,9 +429,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
@@ -522,9 +473,6 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
struct qeth_switch_info sw_info;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return sprintf(buf, "n/a\n");
@@ -555,8 +503,6 @@ static ssize_t qeth_hw_trap_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
if (card->info.hwtrap)
return snprintf(buf, 5, "arm\n");
else
@@ -570,9 +516,6 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
int rc = 0;
int state = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card))
state = 1;
@@ -607,24 +550,12 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
qeth_hw_trap_store);
-static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
-{
-
- if (!card)
- return -EINVAL;
-
- return sprintf(buf, "%i\n", value);
-}
-
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -645,7 +576,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+ return sprintf(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -657,8 +588,6 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
&card->info.blkt.time_total, 5000);
}
-
-
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
@@ -667,7 +596,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -687,8 +616,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card,
- card->info.blkt.inter_packet_jumbo);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 096698df3886..f7485c6dea25 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -49,6 +49,8 @@ static const struct qeth_stats card_stats[] = {
QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+ QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
+ QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
};
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8f3093d24b12..ae69c981650d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -315,29 +315,19 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_LAYER2:
+
+ if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
len = skb->len;
napi_gro_receive(&card->napi, skb);
- break;
- case QETH_HEADER_TYPE_OSN:
- if (IS_OSN(card)) {
- skb_push(skb, sizeof(struct qeth_hdr));
- skb_copy_to_linear_data(skb, hdr,
- sizeof(struct qeth_hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- break;
- }
- /* Else, fall through */
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
+ } else {
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ len = skb->len;
+ card->osn_info.data_cb(skb);
}
+
work_done++;
budget--;
QETH_CARD_STAT_INC(card, rx_packets);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index f2c3b127b1e4..d9af5fe040be 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,9 +18,6 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -79,8 +76,6 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
int rc = 0;
enum qeth_sbp_roles role;
- if (!card)
- return -EINVAL;
if (sysfs_streq(buf, "primary"))
role = QETH_SBP_ROLE_PRIMARY;
else if (sysfs_streq(buf, "secondary"))
@@ -132,9 +127,6 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -150,9 +142,6 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
bool enable;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &enable);
if (rc)
return rc;
@@ -183,9 +172,6 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -207,9 +193,6 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
int enable, primary;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (sysfs_streq(buf, "none")) {
enable = 0;
primary = 0;
@@ -315,9 +298,6 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
return sprintf(buf, "n/a (BridgePort)\n");
@@ -335,9 +315,6 @@ static ssize_t qeth_vnicc_timeout_store(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtou32(buf, 10, &timeout);
if (rc)
return rc;
@@ -357,9 +334,6 @@ static ssize_t qeth_vnicc_char_show(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
@@ -380,9 +354,6 @@ static ssize_t qeth_vnicc_char_store(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
if (kstrtobool(buf, &state))
return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index ba913d1ab88d..2421f29021c1 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -54,6 +54,7 @@ static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr,
addr->type = type;
addr->proto = proto;
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ addr->ref_counter = 1;
}
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 70d4586dc779..e7ce73b9f016 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -39,7 +39,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
@@ -64,15 +63,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf);
}
-static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
-{
- struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
-
- if (addr)
- qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
- return addr;
-}
-
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
@@ -217,13 +207,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
- addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+ addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
- memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
- addr->ref_counter = 1;
-
if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->ipato = 1;
@@ -1114,171 +1101,83 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void
-qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
{
+ struct qeth_card *card = arg;
+ struct inet6_dev *in6_dev;
+ struct in_device *in4_dev;
+ struct qeth_ipaddr *ipm;
+ struct qeth_ipaddr tmp;
struct ip_mc_list *im4;
- struct qeth_ipaddr *tmp, *ipm;
+ struct ifmcaddr6 *im6;
QETH_CARD_TEXT(card, 4, "addmc");
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!tmp)
- return;
+ if (!dev || !(dev->flags & IFF_UP))
+ goto out;
- for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
- im4 = rcu_dereference(im4->next_rcu)) {
- tmp->u.a4.addr = im4->multiaddr;
- tmp->is_multicast = 1;
+ in4_dev = __in_dev_get_rtnl(dev);
+ if (!in4_dev)
+ goto walk_ipv6;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
+
+ for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
+ im4 = rtnl_dereference(im4->next_rcu)) {
+ tmp.u.a4.addr = im4->multiaddr;
+
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- } else {
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!ipm)
- continue;
-
- ipm->u.a4.addr = im4->multiaddr;
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
- hash_add(card->ip_mc_htable,
- &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+ continue;
}
- }
-
- kfree(tmp);
-}
-
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc(struct qeth_card *card)
-{
- struct in_device *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "addmcvl");
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = __in_dev_get_rcu(netdev);
- if (!in_dev)
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
+ if (!ipm)
continue;
- qeth_l3_add_mc_to_hash(card, in_dev);
- }
-}
-
-static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
-{
- struct in_device *in4_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv4");
- rcu_read_lock();
- in4_dev = __in_dev_get_rcu(card->dev);
- if (in4_dev == NULL)
- goto unlock;
- qeth_l3_add_mc_to_hash(card, in4_dev);
- qeth_l3_add_vlan_mc(card);
-unlock:
- rcu_read_unlock();
-}
+ hash_add(card->ip_mc_htable, &ipm->hnode,
+ qeth_l3_ipaddr_hash(ipm));
+ }
-static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
- struct inet6_dev *in6_dev)
-{
- struct qeth_ipaddr *ipm;
- struct ifmcaddr6 *im6;
- struct qeth_ipaddr *tmp;
+walk_ipv6:
+ if (!qeth_is_supported(card, IPA_IPV6))
+ goto out;
- QETH_CARD_TEXT(card, 4, "addmc6");
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+ goto out;
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!tmp)
- return;
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
+ read_lock_bh(&in6_dev->lock);
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- tmp->u.a6.addr = im6->mca_addr;
- tmp->is_multicast = 1;
+ tmp.u.a6.addr = im6->mca_addr;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
if (!ipm)
continue;
- ipm->u.a6.addr = im6->mca_addr;
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
}
- kfree(tmp);
-}
-
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
-{
- struct inet6_dev *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "admc6vl");
-
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = in6_dev_get(netdev);
- if (!in_dev)
- continue;
- read_lock_bh(&in_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in_dev);
- read_unlock_bh(&in_dev->lock);
- in6_dev_put(in_dev);
- }
-}
-
-static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
-{
- struct inet6_dev *in6_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv6");
-
- if (!qeth_is_supported(card, IPA_IPV6))
- return ;
- in6_dev = in6_dev_get(card->dev);
- if (!in6_dev)
- return;
-
- rcu_read_lock();
- read_lock_bh(&in6_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in6_dev);
- qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
- rcu_read_unlock();
- in6_dev_put(in6_dev);
+
+out:
+ return 0;
}
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
@@ -1286,7 +1185,7 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- set_bit(vid, card->active_vlans);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
return 0;
}
@@ -1296,9 +1195,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
-
- clear_bit(vid, card->active_vlans);
- qeth_l3_set_rx_mode(dev);
return 0;
}
@@ -1366,7 +1262,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- unsigned int len;
*done = 0;
WARN_ON_ONCE(!budget);
@@ -1378,25 +1273,17 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l3.id) {
- case QETH_HEADER_TYPE_LAYER3:
+
+ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
qeth_l3_rebuild_skb(card, skb, hdr);
- /* fall through */
- case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
- skb->protocol = eth_type_trans(skb, skb->dev);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- break;
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
- }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+
+ napi_gro_receive(&card->napi, skb);
work_done++;
budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
@@ -1462,8 +1349,11 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- qeth_l3_add_multicast_ipv4(card);
- qeth_l3_add_multicast_ipv6(card);
+ rtnl_lock();
+ qeth_l3_add_mcast_rtnl(card->dev, 0, card);
+ if (qeth_is_supported(card, IPA_FULL_VLAN))
+ vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
+ rtnl_unlock();
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
switch (addr->disp_flag) {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 2f73b33c9347..b6b38b69a5f4 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -60,9 +60,6 @@ static ssize_t qeth_l3_dev_route4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route4, buf);
}
@@ -109,9 +106,6 @@ static ssize_t qeth_l3_dev_route4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route4,
QETH_PROT_IPV4, buf, count);
}
@@ -124,9 +118,6 @@ static ssize_t qeth_l3_dev_route6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route6, buf);
}
@@ -135,9 +126,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route6,
QETH_PROT_IPV6, buf, count);
}
@@ -150,9 +138,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
}
@@ -163,9 +148,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -190,9 +172,6 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
@@ -203,9 +182,6 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
int rc = 0;
unsigned long i;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
@@ -248,16 +224,12 @@ out:
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
-
static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char tmp_hsuid[9];
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
@@ -273,9 +245,6 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
char *tmp;
int rc;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
@@ -336,9 +305,6 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
}
@@ -349,9 +315,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
bool enable;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -385,9 +348,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
}
@@ -399,9 +359,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
@@ -460,9 +417,6 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
}
@@ -528,9 +482,6 @@ static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -558,9 +509,6 @@ static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -572,9 +520,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
}
@@ -585,9 +530,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
@@ -617,9 +559,6 @@ static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
}
@@ -628,9 +567,6 @@ static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -643,9 +579,6 @@ static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -679,9 +612,6 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- if (!card)
- return -EINVAL;
-
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
@@ -741,9 +671,6 @@ static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -771,9 +698,6 @@ static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -793,9 +717,6 @@ static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -808,9 +729,6 @@ static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -884,9 +802,6 @@ static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -914,9 +829,6 @@ static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -936,9 +848,6 @@ static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -951,9 +860,6 @@ static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f4b879d25fe9..fc6e4546d738 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -851,9 +851,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!(vport->fc_flag & FC_PT2PT)) {
/* Check config parameter use-adisc or FCP-2 */
- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
- (ndlp->nlp_type & NLP_FCP_TARGET))) {
+ (ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a0c6945b8139..614f78dddafe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -7866,7 +7866,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (sli4_hba->hdwq) {
for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
- if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+ if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
fpeq = eq;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 30bafd9d21e9..7259bce85e0e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -440,9 +440,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
- else if (start == (ha->flt_region_boot * 4) ||
- start == (ha->flt_region_fw * 4))
- valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
valid = 1;
if (!valid) {
@@ -489,8 +486,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"Writing flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size);
- ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
+ if (rval)
+ rval = -EIO;
break;
default:
rval = -EINVAL;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 28d587a89ba6..99f0a1a08143 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -253,7 +253,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
srb_t *sp;
const char *type;
int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
@@ -432,7 +432,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
int req_sg_cnt, rsp_sg_cnt;
uint16_t loop_id;
struct fc_port *fcport;
@@ -1950,7 +1950,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 1cc6913f76c4..4a1f21c11758 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -702,6 +702,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
mcp->mb[4] = 0;
+ mcp->mb[11] = 0;
ha->flags.using_lr_setting = 0;
if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -746,7 +747,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
- mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+ mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
} else {
mcp->mb[1] = LSW(risc_addr);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 6afad68e5ba2..238240984bc1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -76,9 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
- wait_event_timeout(vha->vref_waitq,
- atomic_read(&vha->vref_count), HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->vref_waitq,
+ !atomic_read(&vha->vref_count), HZ) > 0)
+ break;
+ }
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bcb1e8598888..726ad4cbf4a6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1119,9 +1119,11 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha, 0);
- for (i = 0; i < 10; i++)
- wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
- HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->fcport_waitQ,
+ test_fcport_count(vha), HZ) > 0)
+ break;
+ }
flush_workqueue(vha->hw->wq);
}
@@ -3535,6 +3537,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
qla2x00_try_to_stop_firmware(vha);
}
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
/* Turn adapter off line */
vha->flags.online = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5447738906ac..91c007d26c1e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1883,7 +1883,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_inline_sgl_size(shost);
+ sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+ scsi_mq_inline_sgl_size(shost));
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) +
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 03163ac5fe95..ebb40160539f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1166,11 +1166,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- bool dif, dix;
unsigned int mask = logical_to_sectors(sdp, 1) - 1;
bool write = rq_data_dir(rq) == WRITE;
unsigned char protect, fua;
blk_status_t ret;
+ unsigned int dif;
+ bool dix;
ret = scsi_init_io(cmd);
if (ret != BLK_STS_OK)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index de4019dc0f0b..1efc69e194f8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -263,25 +263,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
int result = cmd->result;
struct request *rq = cmd->request;
- switch (req_op(rq)) {
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_RESET_ALL:
-
- if (result &&
- sshdr->sense_key == ILLEGAL_REQUEST &&
- sshdr->asc == 0x24)
- /*
- * INVALID FIELD IN CDB error: reset of a conventional
- * zone was attempted. Nothing to worry about, so be
- * quiet about the error.
- */
- rq->rq_flags |= RQF_QUIET;
- break;
-
- case REQ_OP_WRITE:
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
- break;
+ if (req_op(rq) == REQ_OP_ZONE_RESET &&
+ result &&
+ sshdr->sense_key == ILLEGAL_REQUEST &&
+ sshdr->asc == 0x24) {
+ /*
+ * INVALID FIELD IN CDB error: reset of a conventional
+ * zone was attempted. Nothing to worry about, so be
+ * quiet about the error.
+ */
+ rq->rq_flags |= RQF_QUIET;
}
}
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index a9344eb4e047..dc2f6d2b46ed 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
+ pm_runtime_get_sync(hba->dev);
+
msgcode = bsg_request->msgcode;
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
@@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
+ pm_runtime_put_sync(hba->dev);
+
if (!desc_buff)
goto out;
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index d9231bd3c691..98b9d9a902ae 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -249,13 +249,13 @@ static struct genpd_power_state imx6_pm_domain_pu_state = {
};
static struct imx_pm_domain imx_gpc_domains[] = {
- [GPC_PGC_DOMAIN_ARM] {
+ [GPC_PGC_DOMAIN_ARM] = {
.base = {
.name = "ARM",
.flags = GENPD_FLAG_ALWAYS_ON,
},
},
- [GPC_PGC_DOMAIN_PU] {
+ [GPC_PGC_DOMAIN_PU] = {
.base = {
.name = "PU",
.power_off = imx6_pm_domain_power_off,
@@ -266,7 +266,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
.reg_offs = 0x260,
.cntr_pdn_bit = 0,
},
- [GPC_PGC_DOMAIN_DISPLAY] {
+ [GPC_PGC_DOMAIN_DISPLAY] = {
.base = {
.name = "DISPLAY",
.power_off = imx6_pm_domain_power_off,
@@ -275,7 +275,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
.reg_offs = 0x240,
.cntr_pdn_bit = 4,
},
- [GPC_PGC_DOMAIN_PCI] {
+ [GPC_PGC_DOMAIN_PCI] = {
.base = {
.name = "PCI",
.power_off = imx6_pm_domain_power_off,
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index f518273cfbe3..c8c80df090d1 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -5,6 +5,7 @@
menuconfig SOUNDWIRE
tristate "SoundWire support"
+ depends on ACPI || OF
help
SoundWire is a 2-Pin interface with data and clock line ratified
by the MIPI Alliance. SoundWire is used for transporting data
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index f1e38a293967..13c54eac0cc3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -900,7 +900,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
/* Create PCM DAIs */
stream = &cdns->pcm;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in,
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
off, stream->num_ch_in, true);
if (ret)
return ret;
@@ -931,7 +931,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
if (ret)
return ret;
- off += cdns->pdm.num_bd;
+ off += cdns->pdm.num_out;
ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
off, stream->num_ch_bd, false);
if (ret)
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 48a63ca130d2..6473fa602f82 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -128,7 +128,8 @@ int sdw_of_find_slaves(struct sdw_bus *bus)
struct device_node *node;
for_each_child_of_node(bus->dev->of_node, node) {
- int link_id, sdw_version, ret, len;
+ int link_id, ret, len;
+ unsigned int sdw_version;
const char *compat = NULL;
struct sdw_slave_id id;
const __be32 *addr;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index c70caf4ea490..a2b5c796bbc4 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1831,7 +1831,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
while (credits) {
struct sk_buff *p = cxgbit_sock_peek_wr(csk);
- const u32 csum = (__force u32)p->csum;
+ u32 csum;
if (unlikely(!p)) {
pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
@@ -1840,6 +1840,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
break;
}
+ csum = (__force u32)p->csum;
if (unlikely(credits < csum)) {
pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
csk, csk->tid,
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
index 61cd09cef943..6795851aac95 100644
--- a/drivers/thunderbolt/nhi_ops.c
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -80,7 +80,6 @@ static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd c
{
u32 data;
- pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 410bf1bceeee..5ea8db667e83 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -896,12 +896,13 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
*/
bool tb_dp_port_is_enabled(struct tb_port *port)
{
- u32 data;
+ u32 data[2];
- if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ ARRAY_SIZE(data)))
return false;
- return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+ return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
}
/**
@@ -914,19 +915,21 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
*/
int tb_dp_port_enable(struct tb_port *port, bool enable)
{
- u32 data;
+ u32 data[2];
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+ ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
- data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
else
- data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+ data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
- return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+ return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
+ ARRAY_SIZE(data));
}
/* switch utility functions */
@@ -1031,13 +1034,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
if (sw->authorized)
goto unlock;
- /*
- * Make sure there is no PCIe rescan ongoing when a new PCIe
- * tunnel is created. Otherwise the PCIe rescan code might find
- * the new tunnel too early.
- */
- pci_lock_rescan_remove();
-
switch (val) {
/* Approve switch */
case 1:
@@ -1057,8 +1053,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
break;
}
- pci_unlock_rescan_remove();
-
if (!ret) {
sw->authorized = val;
/* Notify status change to the userspace */
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 9050b380ab83..4c1e75509303 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -2329,8 +2329,6 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev)
writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
cdns3_configure_dmult(priv_dev, NULL);
-
- cdns3_gadget_pullup(&priv_dev->gadget, 1);
}
/**
@@ -2345,9 +2343,35 @@ static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
unsigned long flags;
+ enum usb_device_speed max_speed = driver->max_speed;
spin_lock_irqsave(&priv_dev->lock, flags);
priv_dev->gadget_driver = driver;
+
+ /* limit speed if necessary */
+ max_speed = min(driver->max_speed, gadget->max_speed);
+
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+ writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+ break;
+ case USB_SPEED_HIGH:
+ writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+ break;
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(priv_dev->dev,
+ "invalid maximum_speed parameter %d\n",
+ max_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ max_speed = USB_SPEED_SUPER;
+ break;
+ }
+
cdns3_gadget_config(priv_dev);
spin_unlock_irqrestore(&priv_dev->lock, flags);
return 0;
@@ -2381,6 +2405,8 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_EPRST), 1, 100);
+
+ priv_ep->flags &= ~EP_CLAIMED;
}
/* disable interrupt for device */
@@ -2575,12 +2601,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
/* Check the maximum_speed parameter */
switch (max_speed) {
case USB_SPEED_FULL:
- writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
- writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
- break;
case USB_SPEED_HIGH:
- writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
- break;
case USB_SPEED_SUPER:
break;
default:
@@ -2713,8 +2734,6 @@ static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
/* disable interrupt for device */
writel(0, &priv_dev->regs->usb_ien);
- cdns3_gadget_pullup(&priv_dev->gadget, 0);
-
return 0;
}
diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h
index b498a170b7e8..ae11810f8826 100644
--- a/drivers/usb/cdns3/host-export.h
+++ b/drivers/usb/cdns3/host-export.h
@@ -12,7 +12,6 @@
#ifdef CONFIG_USB_CDNS3_HOST
int cdns3_host_init(struct cdns3 *cdns);
-void cdns3_host_exit(struct cdns3 *cdns);
#else
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 2733a8f71fcd..ad788bf3fe4f 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include "core.h"
#include "drd.h"
+#include "host-export.h"
static int __cdns3_host_init(struct cdns3 *cdns)
{
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 151a74a54386..1ac1095bfeac 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -348,6 +348,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
/* Validate the wMaxPacketSize field */
maxp = usb_endpoint_maxp(&endpoint->desc);
+ if (maxp == 0) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 89abc6078703..556a876c7896 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -102,6 +102,7 @@ config USB_DWC3_MESON_G12A
depends on ARCH_MESON || COMPILE_TEST
default USB_DWC3
select USB_ROLE_SWITCH
+ select REGMAP_MMIO
help
Support USB2/3 functionality in Amlogic G12A platforms.
Say 'Y' or 'M' if you have one such device.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 999ce5e84d3c..97d6ae3c4df2 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -312,8 +312,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
- if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
- "request value same as default, ignoring\n")) {
+ if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 5e8e18222f92..023f0357efd7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -258,7 +258,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
ret = platform_device_add_properties(dwc->dwc3, p);
if (ret < 0)
- return ret;
+ goto err;
ret = dwc3_pci_quirks(dwc);
if (ret)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 86dc1db788a9..a9aba716bf80 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -707,6 +707,12 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
+
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
+
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ }
}
/**
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d516e8d6cd7f..5ec54b69c29c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2170,14 +2170,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
kfree(cdev->os_desc_req->buf);
+ cdev->os_desc_req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
+ cdev->os_desc_req = NULL;
}
if (cdev->req) {
if (cdev->setup_pending)
usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
kfree(cdev->req->buf);
+ cdev->req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+ cdev->req = NULL;
}
cdev->next_string_id = 0;
device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 025129942894..33852c2b29d1 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -61,6 +61,8 @@ struct gadget_info {
bool use_os_desc;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
+ spinlock_t spinlock;
+ bool unbind;
};
static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -1244,6 +1246,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
int ret;
/* the gi->lock is hold by the caller */
+ gi->unbind = 0;
cdev->gadget = gadget;
set_gadget_data(gadget, cdev);
ret = composite_dev_prepare(composite, cdev);
@@ -1376,31 +1379,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
+ unsigned long flags;
/* the gi->lock is hold by the caller */
cdev = get_gadget_data(gadget);
gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ gi->unbind = 1;
+ spin_unlock_irqrestore(&gi->spinlock, flags);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
purge_configs_funcs(gi);
composite_dev_cleanup(cdev);
usb_ep_autoconfig_reset(cdev->gadget);
+ spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
set_gadget_data(gadget, NULL);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static int configfs_composite_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+ int ret;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return 0;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return 0;
+ }
+
+ ret = composite_setup(gadget, ctrl);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return ret;
+}
+
+static void configfs_composite_disconnect(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_disconnect(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_suspend(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_suspend(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_resume(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_resume(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
}
static const struct usb_gadget_driver configfs_driver_template = {
.bind = configfs_composite_bind,
.unbind = configfs_composite_unbind,
- .setup = composite_setup,
- .reset = composite_disconnect,
- .disconnect = composite_disconnect,
+ .setup = configfs_composite_setup,
+ .reset = configfs_composite_disconnect,
+ .disconnect = configfs_composite_disconnect,
- .suspend = composite_suspend,
- .resume = composite_resume,
+ .suspend = configfs_composite_suspend,
+ .resume = configfs_composite_resume,
.max_speed = USB_SPEED_SUPER,
.driver = {
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 86ffc8307864..1d0d8952a74b 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -449,9 +449,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
next_fifo_transaction(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
- usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else {
- usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
}
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 92af8dc98c3d..51fa614b4079 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep)
if (ep->enabled)
goto out;
+ /* UDC drivers can't handle endpoints with maxpacket size 0 */
+ if (usb_endpoint_maxp(ep->desc) == 0) {
+ /*
+ * We should log an error message here, but we can't call
+ * dev_err() because there's no way to find the gadget
+ * given only ep.
+ */
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ep->ops->enable(ep, ep->desc);
if (ret)
goto out;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 20141c3096f6..9a05863b2876 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2576,7 +2576,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
dma_pool_destroy(udc_controller->td_pool);
free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs);
- if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+ if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
release_mem_region(res->start, resource_size(res));
/* free udc --wait for the release() finished */
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index e098f16c01cb..33703140233a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1544,10 +1544,10 @@ static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr)
static bool usb3_std_req_set_address(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
- if (ctrl->wValue >= 128)
+ if (le16_to_cpu(ctrl->wValue) >= 128)
return true; /* stall */
- usb3_set_device_address(usb3, ctrl->wValue);
+ usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue));
usb3_set_p0_con_for_no_data(usb3);
return false;
@@ -1582,6 +1582,7 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
struct renesas_usb3_ep *usb3_ep;
int num;
u16 status = 0;
+ __le16 tx_data;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
@@ -1604,10 +1605,10 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
}
if (!stall) {
- status = cpu_to_le16(status);
+ tx_data = cpu_to_le16(status);
dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n",
usb_req_to_usb3_req(usb3->ep0_req));
- usb3_pipe0_internal_xfer(usb3, &status, sizeof(status),
+ usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data),
usb3_pipe0_get_status_completion);
}
@@ -1772,7 +1773,7 @@ static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3,
static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
- if (ctrl->wValue > 0)
+ if (le16_to_cpu(ctrl->wValue) > 0)
usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
else
usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 7ba6afc7ef23..76c3f29562d2 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s,
trb = &seg->trbs[i];
dma = seg->dma + i * sizeof(*trb);
seq_printf(s, "%pad: %s\n", &dma,
- xhci_decode_trb(trb->generic.field[0],
- trb->generic.field[1],
- trb->generic.field[2],
- trb->generic.field[3]));
+ xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
+ le32_to_cpu(trb->generic.field[1]),
+ le32_to_cpu(trb->generic.field[2]),
+ le32_to_cpu(trb->generic.field[3])));
}
}
@@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
- xhci_decode_slot_context(slot_ctx->dev_info,
- slot_ctx->dev_info2,
- slot_ctx->tt_info,
- slot_ctx->dev_state));
+ xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
+ le32_to_cpu(slot_ctx->dev_info2),
+ le32_to_cpu(slot_ctx->tt_info),
+ le32_to_cpu(slot_ctx->dev_state)));
return 0;
}
@@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
- xhci_decode_ep_context(ep_ctx->ep_info,
- ep_ctx->ep_info2,
- ep_ctx->deq,
- ep_ctx->tx_info));
+ xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
+ le32_to_cpu(ep_ctx->ep_info2),
+ le64_to_cpu(ep_ctx->deq),
+ le32_to_cpu(ep_ctx->tx_info)));
}
return 0;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85ceb43e3405..e7aab31fd9a5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3330,6 +3330,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
+ le64_to_cpus(&send_addr);
field |= TRB_IDT;
}
}
@@ -3475,6 +3476,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&addr, urb->transfer_buffer,
urb->transfer_buffer_length);
+ le64_to_cpus(&addr);
field |= TRB_IDT;
} else {
addr = (u64) urb->transfer_dma;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 517ec3206f6e..6c17e3fe181a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3071,6 +3071,48 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
}
}
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
+ struct usb_device *udev;
+ unsigned long flags;
+ unsigned int ep_index;
+
+ xhci = hcd_to_xhci(hcd);
+rescan:
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ udev = (struct usb_device *)host_ep->hcpriv;
+ if (!udev || !udev->slot_id)
+ goto done;
+
+ vdev = xhci->devs[udev->slot_id];
+ if (!vdev)
+ goto done;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = &vdev->eps[ep_index];
+ if (!ep)
+ goto done;
+
+ /* wait for hub_tt_work to finish clearing hub TT */
+ if (ep->ep_state & EP_CLEARING_TT) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ }
+
+ if (ep->ep_state)
+ xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
+ ep->ep_state);
+done:
+ host_ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
/*
* Called after usb core issues a clear halt control message.
* The host side of the halt should already be cleared by a reset endpoint
@@ -5238,20 +5280,13 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
- /*
- * udev might be NULL if tt buffer is cleared during a failed device
- * enumeration due to a halted control endpoint. Usb core might
- * have allocated a new udev for the next enumeration attempt.
- */
-
xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irqsave(&xhci->lock, flags);
udev = (struct usb_device *)ep->hcpriv;
- if (!udev)
- return;
slot_id = udev->slot_id;
ep_index = xhci_get_endpoint_index(&ep->desc);
- spin_lock_irqsave(&xhci->lock, flags);
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -5288,6 +5323,7 @@ static const struct hc_driver xhci_hc_driver = {
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
+ .endpoint_disable = xhci_endpoint_disable,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 15b5f06fb0b3..8f86b4ebca89 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -487,7 +487,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
}
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
- dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
+ dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n",
*actual_buffer-bytes_to_read);
/* copy one interrupt_in_buffer from ring_buffer into userspace */
@@ -495,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = -EFAULT;
goto unlock_exit;
}
- dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
-
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
+ dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
+
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
@@ -562,8 +562,9 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
- dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write);
- dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
+ dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n",
+ count - bytes_to_write);
+ dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n",
__func__, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
@@ -580,7 +581,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
- USB_CTRL_SET_TIMEOUT * HZ);
+ USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index c3d5c1206eec..9dd02160cca9 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include "mtu3.h"
+#include "mtu3_dr.h"
#include "mtu3_debug.h"
#include "mtu3_trace.h"
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4c3de777ef6c..a3c30b609433 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -162,17 +162,17 @@ void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
req->bRequest = (val >> 8) & 0xFF;
req->bRequestType = (val >> 0) & 0xFF;
- req->wValue = usbhs_read(priv, USBVAL);
- req->wIndex = usbhs_read(priv, USBINDX);
- req->wLength = usbhs_read(priv, USBLENG);
+ req->wValue = cpu_to_le16(usbhs_read(priv, USBVAL));
+ req->wIndex = cpu_to_le16(usbhs_read(priv, USBINDX));
+ req->wLength = cpu_to_le16(usbhs_read(priv, USBLENG));
}
void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
{
usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType);
- usbhs_write(priv, USBVAL, req->wValue);
- usbhs_write(priv, USBINDX, req->wIndex);
- usbhs_write(priv, USBLENG, req->wLength);
+ usbhs_write(priv, USBVAL, le16_to_cpu(req->wValue));
+ usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex));
+ usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength));
usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
}
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index e5ef56991dba..cd38d74b3223 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -265,7 +265,7 @@ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
case USB_DEVICE_TEST_MODE:
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
udelay(100);
- usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+ usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex) >> 8);
break;
default:
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
@@ -315,7 +315,7 @@ static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usb_request *req;
- unsigned short *buf;
+ __le16 *buf;
/* alloc new usb_request for recip */
req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 79314d8c94a4..ca3bd58f2025 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -559,6 +559,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
+
+ if (command_port->bulk_out_size < datasize + 1)
+ return -EIO;
+
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
@@ -632,6 +636,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
+ speed_t baud;
port_settings.port = port->port_number + 1;
@@ -692,11 +697,13 @@ static void firm_setup_port(struct tty_struct *tty)
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
- port_settings.baud = tty_get_baud_rate(tty);
- dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
+ baud = tty_get_baud_rate(tty);
+ port_settings.baud = cpu_to_le32(baud);
+ dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
- tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
+ tty_encode_baud_rate(tty, baud, baud);
+
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 00398149cd8d..269e727a92f9 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -87,7 +87,7 @@ struct whiteheat_simple {
struct whiteheat_port_settings {
__u8 port; /* port number (1 to N) */
- __u32 baud; /* any value 7 - 460800, firmware calculates
+ __le32 baud; /* any value 7 - 460800, firmware calculates
best fit; arrives little endian */
__u8 bits; /* 5, 6, 7, or 8 */
__u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 6737fab94959..54a3c8195c96 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -68,7 +68,6 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
- int maxp;
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@@ -78,15 +77,6 @@ static int slave_alloc (struct scsi_device *sdev)
sdev->inquiry_len = 36;
/*
- * USB has unusual scatter-gather requirements: the length of each
- * scatterlist element except the last must be divisible by the
- * Bulk maxpacket value. Fortunately this value is always a
- * power of 2. Inform the block layer about this requirement.
- */
- maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
- blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
- /*
* Some host controllers may have alignment requirements.
* We'll play it safe by requiring 512-byte alignment always.
*/
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index bf80d6f81f58..34538253f12c 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -789,30 +789,10 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
- int maxp;
sdev->hostdata = devinfo;
/*
- * We have two requirements here. We must satisfy the requirements
- * of the physical HC and the demands of the protocol, as we
- * definitely want no additional memory allocation in this path
- * ruling out using bounce buffers.
- *
- * For a transmission on USB to continue we must never send
- * a package that is smaller than maxpacket. Hence the length of each
- * scatterlist element except the last must be divisible by the
- * Bulk maxpacket value.
- * If the HC does not ensure that through SG,
- * the upper layer must do that. We must assume nothing
- * about the capabilities off the HC, so we use the most
- * pessimistic requirement.
- */
-
- maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
- blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
- /*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.
* As this is not exported, we use an extremely conservative guess.
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index c3803785f6ef..0ae40a13a9fe 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -147,7 +147,10 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
}
kfree(iov);
+ /* This is only for isochronous case */
kfree(iso_buffer);
+ iso_buffer = NULL;
+
usbip_dbg_vhci_tx("send txdata\n");
total_size += txsize;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 9f57736fe15e..dde392b91bb3 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -384,6 +384,49 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .module = THIS_MODULE,
+
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
+
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -438,7 +481,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
/* Only accept correctly addressed packets */
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
- virtio_transport_recv_pkt(pkt);
+ virtio_transport_recv_pkt(&vhost_transport, pkt);
else
virtio_transport_free_pkt(pkt);
@@ -675,6 +718,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
if (guest_cid > U32_MAX)
return -EINVAL;
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
+ * VM), to make the loopback work.
+ */
+ if (vsock_find_cid(guest_cid))
+ return -EADDRINUSE;
+
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
other = vhost_vsock_get(guest_cid);
@@ -786,57 +835,12 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops,
};
-static struct virtio_transport vhost_transport = {
- .transport = {
- .get_local_cid = vhost_transport_get_local_cid,
-
- .init = virtio_transport_do_socket_init,
- .destruct = virtio_transport_destruct,
- .release = virtio_transport_release,
- .connect = virtio_transport_connect,
- .shutdown = virtio_transport_shutdown,
- .cancel_pkt = vhost_transport_cancel_pkt,
-
- .dgram_enqueue = virtio_transport_dgram_enqueue,
- .dgram_dequeue = virtio_transport_dgram_dequeue,
- .dgram_bind = virtio_transport_dgram_bind,
- .dgram_allow = virtio_transport_dgram_allow,
-
- .stream_enqueue = virtio_transport_stream_enqueue,
- .stream_dequeue = virtio_transport_stream_dequeue,
- .stream_has_data = virtio_transport_stream_has_data,
- .stream_has_space = virtio_transport_stream_has_space,
- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
- .stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
-
- .notify_poll_in = virtio_transport_notify_poll_in,
- .notify_poll_out = virtio_transport_notify_poll_out,
- .notify_recv_init = virtio_transport_notify_recv_init,
- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
- .notify_send_init = virtio_transport_notify_send_init,
- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
- },
-
- .send_pkt = vhost_transport_send_pkt,
-};
-
static int __init vhost_vsock_init(void)
{
int ret;
- ret = vsock_core_init(&vhost_transport.transport);
+ ret = vsock_core_register(&vhost_transport.transport,
+ VSOCK_TRANSPORT_F_H2G);
if (ret < 0)
return ret;
return misc_register(&vhost_vsock_misc);
@@ -845,7 +849,7 @@ static int __init vhost_vsock_init(void)
static void __exit vhost_vsock_exit(void)
{
misc_deregister(&vhost_vsock_misc);
- vsock_core_exit();
+ vsock_core_unregister(&vhost_transport.transport);
};
module_init(vhost_vsock_init);
diff --git a/drivers/video/fbdev/c2p_core.h b/drivers/video/fbdev/c2p_core.h
index e1035a865fb9..45a6d895a7d7 100644
--- a/drivers/video/fbdev/c2p_core.h
+++ b/drivers/video/fbdev/c2p_core.h
@@ -29,7 +29,7 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
extern void c2p_unsupported(void);
-static inline u32 get_mask(unsigned int n)
+static __always_inline u32 get_mask(unsigned int n)
{
switch (n) {
case 1:
@@ -57,7 +57,7 @@ static inline u32 get_mask(unsigned int n)
* Transpose operations on 8 32-bit words
*/
-static inline void transp8(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m)
{
u32 mask = get_mask(n);
@@ -99,7 +99,7 @@ static inline void transp8(u32 d[], unsigned int n, unsigned int m)
* Transpose operations on 4 32-bit words
*/
-static inline void transp4(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m)
{
u32 mask = get_mask(n);
@@ -126,7 +126,7 @@ static inline void transp4(u32 d[], unsigned int n, unsigned int m)
* Transpose operations on 4 32-bit words (reverse order)
*/
-static inline void transp4x(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m)
{
u32 mask = get_mask(n);
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
index b0152fef4fc7..bc60e036627a 100644
--- a/drivers/watchdog/bd70528_wdt.c
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -288,3 +288,4 @@ module_platform_driver(bd70528_wdt);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 watchdog driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-wdt");
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 9393be584e72..808eeb4779e4 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timer.h>
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/io.h>
@@ -473,6 +474,11 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return cpwd_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+
static ssize_t cpwd_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -497,7 +503,7 @@ static ssize_t cpwd_read(struct file *file, char __user *buffer,
static const struct file_operations cpwd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cpwd_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
+ .compat_ioctl = cpwd_compat_ioctl,
.open = cpwd_open,
.write = cpwd_write,
.read = cpwd_read,
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 7ea5cf54e94a..8ed89f032ebf 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -99,8 +99,14 @@ static int imx_sc_wdt_set_pretimeout(struct watchdog_device *wdog,
{
struct arm_smccc_res res;
+ /*
+ * SCU firmware calculates pretimeout based on current time
+ * stamp instead of watchdog timeout stamp, need to convert
+ * the pretimeout to SCU firmware's timeout value.
+ */
arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_PRETIME_WDOG,
- pretimeout * 1000, 0, 0, 0, 0, 0, &res);
+ (wdog->timeout - pretimeout) * 1000, 0, 0, 0,
+ 0, 0, &res);
if (res.a0)
return -EACCES;
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index d17c1a6ed723..5a9ca10fbcfa 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -89,8 +89,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
- return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
- (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
+ return ((reg & GXBB_WDT_TCNT_SETUP_MASK) -
+ (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000;
}
static const struct watchdog_ops meson_gxbb_wdt_ops = {
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 2d3652004e39..1213179f863c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -163,9 +163,17 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt",
- wdt))
- irq = 0;
+ err = devm_request_irq(dev, irq, pm8916_wdt_isr, 0,
+ "pm8916_wdt", wdt);
+ if (err)
+ return err;
+
+ wdt->wdev.info = &pm8916_wdt_pt_ident;
+ } else {
+ if (irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ wdt->wdev.info = &pm8916_wdt_ident;
}
/* Configure watchdog to hard-reset mode */
@@ -177,7 +185,6 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
return err;
}
- wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident,
wdt->wdev.ops = &pm8916_wdt_ops,
wdt->wdev.parent = dev;
wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index cc12772d0a4d..497f979018c2 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -803,7 +803,12 @@ success:
continue;
if (cookie->inodes[i]) {
- afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
+ struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
+
+ if (test_bit(AFS_VNODE_UNSET, &iv->flags))
+ continue;
+
+ afs_vnode_commit_status(&fc, iv,
scb->cb_break, NULL, scb);
continue;
}
diff --git a/fs/aio.c b/fs/aio.c
index 01e0fb9ae45a..0d9a559d488c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
#ifdef CONFIG_COMPAT
struct __compat_aio_sigset {
- compat_sigset_t __user *sigmask;
+ compat_uptr_t sigmask;
compat_size_t sigsetsize;
};
@@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
@@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
struct __kernel_timespec __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index 2866fabf497f..91f5787dae7c 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
*/
how &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
- if (!found || found != expired)
- /* Something has changed, continue */
+ if (found != expired) { // something has changed, continue
+ dput(found);
goto next;
+ }
if (expired != dentry)
dput(dentry);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c3f386b7cc0b..015910079e73 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -474,6 +474,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
+ u64 i_size;
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
@@ -488,7 +489,19 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
SZ_16K);
- actual_end = min_t(u64, i_size_read(inode), end + 1);
+ /*
+ * We need to save i_size before now because it could change in between
+ * us evaluating the size and assigning it. This is because we lock and
+ * unlock the page in truncate and fallocate, and then modify the i_size
+ * later on.
+ *
+ * The barriers are to emulate READ_ONCE, remove that once i_size_read
+ * does that for us.
+ */
+ barrier();
+ i_size = i_size_read(inode);
+ barrier();
+ actual_end = min_t(u64, i_size, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
@@ -9731,6 +9744,18 @@ out_fail:
commit_transaction = true;
}
if (commit_transaction) {
+ /*
+ * We may have set commit_transaction when logging the new name
+ * in the destination root, in which case we left the source
+ * root context in the list of log contextes. So make sure we
+ * remove it to avoid invalid memory accesses, since the context
+ * was allocated in our stack frame.
+ */
+ if (sync_log_root) {
+ mutex_lock(&root->log_mutex);
+ list_del_init(&ctx_root.list);
+ mutex_unlock(&root->log_mutex);
+ }
ret = btrfs_commit_transaction(trans);
} else {
int ret2;
@@ -9744,6 +9769,9 @@ out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
+ ASSERT(list_empty(&ctx_root.list));
+ ASSERT(list_empty(&ctx_dest.list));
+
return ret;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7c145a41decd..23272d9154f3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4195,9 +4195,6 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
u64 transid;
int ret;
- btrfs_warn(root->fs_info,
- "START_SYNC ioctl is deprecated and will be removed in kernel 5.7");
-
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
@@ -4225,9 +4222,6 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
{
u64 transid;
- btrfs_warn(fs_info,
- "WAIT_SYNC ioctl is deprecated and will be removed in kernel 5.7");
-
if (argp) {
if (copy_from_user(&transid, argp, sizeof(transid)))
return -EFAULT;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 98dc092a905e..e8a4b0ebe97f 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -893,6 +893,15 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
while (ticket->bytes > 0 && ticket->error == 0) {
ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
if (ret) {
+ /*
+ * Delete us from the list. After we unlock the space
+ * info, we don't want the async reclaim job to reserve
+ * space for this ticket. If that would happen, then the
+ * ticket's task would not known that space was reserved
+ * despite getting an error, resulting in a space leak
+ * (bytes_may_use counter of our space_info).
+ */
+ list_del_init(&ticket->list);
ticket->error = -EINTR;
break;
}
@@ -945,12 +954,24 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
ret = ticket->error;
if (ticket->bytes || ticket->error) {
+ /*
+ * Need to delete here for priority tickets. For regular tickets
+ * either the async reclaim job deletes the ticket from the list
+ * or we delete it ourselves at wait_reserve_ticket().
+ */
list_del_init(&ticket->list);
if (!ret)
ret = -ENOSPC;
}
spin_unlock(&space_info->lock);
ASSERT(list_empty(&ticket->list));
+ /*
+ * Check that we can't have an error set if the reservation succeeded,
+ * as that would confuse tasks and lead them to error out without
+ * releasing reserved space (if an error happens the expectation is that
+ * space wasn't reserved at all).
+ */
+ ASSERT(!(ticket->bytes == 0 && ticket->error));
return ret;
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 43e488f5d063..076d5b8014fb 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -686,9 +686,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot,
static int check_dev_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_dev_item *ditem;
- u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK);
if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
dev_item_err(leaf, slot,
@@ -696,12 +694,6 @@ static int check_dev_item(struct extent_buffer *leaf,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
- if (key->offset > max_devid) {
- dev_item_err(leaf, slot,
- "invalid devid: has=%llu expect=[0, %llu]",
- key->offset, max_devid);
- return -EUCLEAN;
- }
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (btrfs_device_id(leaf, ditem) != key->offset) {
dev_item_err(leaf, slot,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bdfe4493e43a..e04409f85063 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4967,6 +4967,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
max_stripe_size = SZ_32M;
max_chunk_size = 2 * max_stripe_size;
+ devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
} else {
btrfs_err(info, "invalid chunk type 0x%llx requested",
type);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d3b9c9d5c1bd..f5a38910a82b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1058,6 +1058,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ /* remove from inode's cap rbtree, and clear auth cap */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+ if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
/* remove from session list */
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
@@ -1091,11 +1096,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_unlock(&session->s_cap_lock);
- /* remove from inode list */
- rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
-
if (removed)
ceph_put_cap(mdsc, cap);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4ca0b8ff9a72..d17a789fd856 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1553,36 +1553,37 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int valid = 0;
struct dentry *parent;
- struct inode *dir;
+ struct inode *dir, *inode;
if (flags & LOOKUP_RCU) {
parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
+ inode = d_inode_rcu(dentry);
} else {
parent = dget_parent(dentry);
dir = d_inode(parent);
+ inode = d_inode(dentry);
}
dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
- dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
+ dentry, inode, ceph_dentry(dentry)->offset);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
- dentry, d_inode(dentry));
+ dentry, inode);
valid = 1;
- } else if (d_really_is_positive(dentry) &&
- ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
+ } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
valid = 1;
} else {
valid = dentry_lease_is_valid(dentry, flags);
if (valid == -ECHILD)
return valid;
if (valid || dir_lease_is_valid(dir, dentry)) {
- if (d_really_is_positive(dentry))
- valid = ceph_is_any_caps(d_inode(dentry));
+ if (inode)
+ valid = ceph_is_any_caps(inode);
else
valid = 1;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d277f71abe0b..8de633964dc3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -462,6 +462,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_security_init_secctx(dentry, mode, &as_ctx);
if (err < 0)
goto out_ctx;
+ } else if (!d_in_lookup(dentry)) {
+ /* If it's not being looked up, it's negative */
+ return -ENOENT;
}
/* do the open */
@@ -750,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
if (!atomic_dec_and_test(&aio_req->pending_reqs))
return;
+ if (aio_req->iocb->ki_flags & IOCB_DIRECT)
+ inode_dio_end(inode);
+
ret = aio_req->error;
if (!ret)
ret = aio_req->total_len;
@@ -1088,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
CEPH_CAP_FILE_RD);
list_splice(&aio_req->osd_reqs, &osd_reqs);
+ inode_dio_begin(inode);
while (!list_empty(&osd_reqs)) {
req = list_first_entry(&osd_reqs,
struct ceph_osd_request,
@@ -1261,14 +1268,24 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_read(inode);
+
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
&got, &pinned_page);
- if (ret < 0)
+ if (ret < 0) {
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
return ret;
+ }
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) ||
@@ -1280,16 +1297,12 @@ again:
if (ci->i_inline_version == CEPH_INLINE_NONE) {
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
- ceph_start_io_direct(inode);
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
- ceph_end_io_direct(inode);
if (ret >= 0 && ret < len)
retry_op = CHECK_EOF;
} else {
- ceph_start_io_read(inode);
ret = ceph_sync_read(iocb, to, &retry_op);
- ceph_end_io_read(inode);
}
} else {
retry_op = READ_INLINE;
@@ -1300,11 +1313,10 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
- ceph_start_io_read(inode);
ret = generic_file_read_iter(iocb, to);
- ceph_end_io_read(inode);
ceph_del_rw_context(fi, &rw_ctx);
}
+
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
if (pinned_page) {
@@ -1312,6 +1324,12 @@ again:
pinned_page = NULL;
}
ceph_put_cap_refs(ci, got);
+
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
+
if (retry_op > HAVE_RETRIED && ret >= 0) {
int statret;
struct page *page = NULL;
@@ -1956,10 +1974,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
return -EOPNOTSUPP;
+ /*
+ * Striped file layouts require that we copy partial objects, but the
+ * OSD copy-from operation only supports full-object copies. Limit
+ * this to non-striped file layouts for now.
+ */
if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
- (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
- (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
+ (src_ci->i_layout.stripe_count != 1) ||
+ (dst_ci->i_layout.stripe_count != 1) ||
+ (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
+ dout("Invalid src/dst files layout\n");
return -EOPNOTSUPP;
+ }
if (len < src_ci->i_layout.object_size)
return -EOPNOTSUPP; /* no remote copy will be done */
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9f135624ae47..c07407586ce8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1434,6 +1434,7 @@ retry_lookup:
dout(" final dn %p\n", dn);
} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP) &&
+ test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
struct inode *dir = req->r_parent;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index edfd643a8205..b47f43fc2d68 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -268,6 +268,7 @@ static int parse_fsopt_token(char *c, void *private)
}
break;
case Opt_fscache_uniq:
+#ifdef CONFIG_CEPH_FSCACHE
kfree(fsopt->fscache_uniq);
fsopt->fscache_uniq = kstrndup(argstr[0].from,
argstr[0].to-argstr[0].from,
@@ -276,7 +277,10 @@ static int parse_fsopt_token(char *c, void *private)
return -ENOMEM;
fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
break;
- /* misc */
+#else
+ pr_err("fscache support is disabled\n");
+ return -EINVAL;
+#endif
case Opt_wsize:
if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
return -EINVAL;
@@ -353,10 +357,15 @@ static int parse_fsopt_token(char *c, void *private)
fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
break;
case Opt_fscache:
+#ifdef CONFIG_CEPH_FSCACHE
fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
kfree(fsopt->fscache_uniq);
fsopt->fscache_uniq = NULL;
break;
+#else
+ pr_err("fscache support is disabled\n");
+ return -EINVAL;
+#endif
case Opt_nofscache:
fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
kfree(fsopt->fscache_uniq);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 4c0922596467..cd55af9b7cc5 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -4084,6 +4084,7 @@ free_pages:
kfree(dw->ppages);
cifs_small_buf_release(dw->buf);
+ kfree(dw);
}
@@ -4157,7 +4158,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
dw->server = server;
dw->ppages = pages;
dw->len = len;
- queue_work(cifsiod_wq, &dw->decrypt);
+ queue_work(decrypt_wq, &dw->decrypt);
*num_mids = 0; /* worker thread takes care of finding mid */
return -1;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ea735d59c36e..0abfde6d0b05 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -838,6 +838,7 @@ struct create_durable_handle_reconnect_v2 {
struct create_context ccontext;
__u8 Name[8];
struct durable_reconnect_context_v2 dcontext;
+ __u8 Pad[4];
} __packed;
/* See MS-SMB2 2.2.13.2.5 */
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index dc5dbf6a81d7..cb61467478ca 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -101,7 +101,7 @@ static int create_link(struct config_item *parent_item,
}
target_sd->s_links++;
spin_unlock(&configfs_dirent_lock);
- ret = configfs_get_target_path(item, item, body);
+ ret = configfs_get_target_path(parent_item, item, body);
if (!ret)
ret = configfs_create_link(target_sd, parent_item->ci_dentry,
dentry, body);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 18426f4855f1..e23752d9a79f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
- dget(lower_dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode->i_ctime = dir->i_ctime;
- d_drop(dentry);
out_unlock:
- unlock_dir(lower_dir_dentry);
dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
+ if (!rc)
+ d_drop(dentry);
return rc;
}
@@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- struct inode *inode, *lower_inode = d_inode(lower_dentry);
+ struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
- struct vfsmount *lower_mnt;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
return ERR_PTR(-ENOMEM);
}
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(lower_dentry->d_parent));
+ d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
- if (d_really_is_negative(lower_dentry)) {
+ /*
+ * negative dentry can go positive under us here - its parent is not
+ * locked. That's OK and that could happen just as we return from
+ * ecryptfs_lookup() anyway. Just need to be careful and fetch
+ * ->d_inode only once - it's not stable here.
+ */
+ lower_inode = READ_ONCE(lower_dentry->d_inode);
+
+ if (!lower_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return NULL;
@@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- dget(dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- dget(lower_dentry);
- rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
- dput(lower_dentry);
- if (!rc && d_really_is_positive(dentry))
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_rmdir(lower_dir_inode, lower_dentry);
+ if (!rc) {
clear_nlink(d_inode(dentry));
- fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
- set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
- unlock_dir(lower_dir_dentry);
+ fsstack_copy_attr_times(dir, lower_dir_inode);
+ set_nlink(dir, lower_dir_inode->i_nlink);
+ }
+ dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
if (!rc)
d_drop(dentry);
- dput(dentry);
return rc;
}
@@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
- struct dentry *trap = NULL;
+ struct dentry *trap;
struct inode *target_inode;
if (flags)
return -EINVAL;
+ lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
+ lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
+
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
- dget(lower_old_dentry);
- dget(lower_new_dentry);
- lower_old_dir_dentry = dget_parent(lower_old_dentry);
- lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
target_inode = d_inode(new_dentry);
+
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ dget(lower_new_dentry);
rc = -EINVAL;
if (lower_old_dentry->d_parent != lower_old_dir_dentry)
goto out_lock;
@@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dput(lower_new_dir_dentry);
- dput(lower_old_dir_dentry);
dput(lower_new_dentry);
- dput(lower_old_dentry);
+ unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
return rc;
}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 09bc68708d28..2dd55b172d57 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
- if (!err) {
- inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir,
- strlen(nbuf));
- inode_unlock(target_dir->d_inode);
- if (!IS_ERR(nresult)) {
- if (nresult->d_inode) {
- dput(result);
- result = nresult;
- } else
- dput(nresult);
- }
+ if (err) {
+ dput(target_dir);
+ goto err_result;
}
+ inode_lock(target_dir->d_inode);
+ nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ if (!IS_ERR(nresult)) {
+ if (unlikely(nresult->d_inode != result->d_inode)) {
+ dput(nresult);
+ nresult = ERR_PTR(-ESTALE);
+ }
+ }
+ inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
+ if (IS_ERR(nresult)) {
+ err = PTR_ERR(nresult);
+ goto err_result;
+ }
+ dput(result);
+ result = nresult;
+
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8461a6322039..335607b8c5c0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
spin_unlock(&inode->i_lock);
/*
- * A dying wb indicates that the memcg-blkcg mapping has changed
- * and a new wb is already serving the memcg. Switch immediately.
+ * A dying wb indicates that either the blkcg associated with the
+ * memcg changed or the associated memcg is dying. In the first
+ * case, a replacement wb should already be available and we should
+ * refresh the wb immediately. In the second case, trying to
+ * refresh will keep failing.
*/
- if (unlikely(wb_dying(wbc->wb)))
+ if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f9a38998f2fc..2c819c3c855d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -326,6 +326,7 @@ struct io_kiocb {
#define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
+#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
u64 user_data;
u32 result;
u32 sequence;
@@ -453,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
- if (req && !__io_sequence_defer(ctx, req)) {
- list_del_init(&req->list);
- return req;
+ if (req) {
+ if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+ return NULL;
+ if (!__io_sequence_defer(ctx, req)) {
+ list_del_init(&req->list);
+ return req;
+ }
}
return NULL;
@@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
}
}
- return 0;
+ return len;
}
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
@@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
+ req->flags |= REQ_F_TIMEOUT;
+
/*
* sqe->off holds how many events that need to occur for this
- * timeout event to be satisfied.
+ * timeout event to be satisfied. If it isn't set, then this is
+ * a pure timeout request, sequence isn't used.
*/
count = READ_ONCE(sqe->off);
- if (!count)
- count = 1;
+ if (!count) {
+ req->flags |= REQ_F_TIMEOUT_NOSEQ;
+ spin_lock_irq(&ctx->completion_lock);
+ entry = ctx->timeout_list.prev;
+ goto add;
+ }
req->sequence = ctx->cached_sq_head + count - 1;
/* reuse it to store the count */
req->submit.sequence = count;
- req->flags |= REQ_F_TIMEOUT;
/*
* Insertion sort, ensuring the first entry in the list is always
@@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned nxt_sq_head;
long long tmp, tmp_nxt;
+ if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+ continue;
+
/*
* Since cached_sq_head + count - 1 can overflow, use type long
* long to store it.
@@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
nxt->sequence++;
}
req->sequence -= span;
+add:
list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock);
@@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
switch (op) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
+ case IORING_OP_TIMEOUT:
return false;
default:
return true;
diff --git a/fs/namespace.c b/fs/namespace.c
index fe0e9e1410fe..2adfe7b166a3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2478,8 +2478,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
time64_to_tm(sb->s_time_max, 0, &tm);
- pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n",
- sb->s_type->name, mntpath,
+ pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
+ sb->s_type->name,
+ is_mounted(mnt) ? "remounted" : "mounted",
+ mntpath,
tm.tm_year+1900, (unsigned long long)sb->s_time_max);
free_page((unsigned long)buf);
@@ -2764,14 +2766,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
if (IS_ERR(mnt))
return PTR_ERR(mnt);
- error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
- if (error < 0) {
- mntput(mnt);
- return error;
- }
-
mnt_warn_timestamp_expiry(mountpoint, mnt);
+ error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
+ if (error < 0)
+ mntput(mnt);
return error;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 53939bf9d7d2..9876db52913a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2098,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
return 0;
}
-static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
- struct file *file,
- loff_t pos, size_t count,
- int *meta_level)
+static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
+ struct buffer_head **di_bh,
+ int meta_level,
+ int overwrite_io,
+ int write_sem,
+ int wait)
{
- int ret;
- struct buffer_head *di_bh = NULL;
- u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
- u32 clusters =
- ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+ int ret = 0;
- ret = ocfs2_inode_lock(inode, &di_bh, 1);
- if (ret) {
- mlog_errno(ret);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, NULL, meta_level);
+ else
+ ret = ocfs2_try_inode_lock(inode,
+ overwrite_io ? NULL : di_bh, meta_level);
+ if (ret < 0)
goto out;
+
+ if (wait) {
+ if (write_sem)
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ } else {
+ if (write_sem)
+ ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+
+ if (!ret) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
}
- *meta_level = 1;
+ return ret;
- ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
- if (ret)
- mlog_errno(ret);
+out_unlock:
+ brelse(*di_bh);
+ ocfs2_inode_unlock(inode, meta_level);
out:
- brelse(di_bh);
return ret;
}
+static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
+ struct buffer_head **di_bh,
+ int meta_level,
+ int write_sem)
+{
+ if (write_sem)
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ brelse(*di_bh);
+ *di_bh = NULL;
+
+ if (meta_level >= 0)
+ ocfs2_inode_unlock(inode, meta_level);
+}
+
static int ocfs2_prepare_inode_for_write(struct file *file,
loff_t pos, size_t count, int wait)
{
int ret = 0, meta_level = 0, overwrite_io = 0;
+ int write_sem = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
struct buffer_head *di_bh = NULL;
+ u32 cpos;
+ u32 clusters;
/*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
for(;;) {
- if (wait)
- ret = ocfs2_inode_lock(inode, NULL, meta_level);
- else
- ret = ocfs2_try_inode_lock(inode,
- overwrite_io ? NULL : &di_bh, meta_level);
+ ret = ocfs2_inode_lock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ overwrite_io,
+ write_sem,
+ wait);
if (ret < 0) {
- meta_level = -1;
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
@@ -2156,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
*/
if (!wait && !overwrite_io) {
overwrite_io = 1;
- if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
- brelse(di_bh);
- di_bh = NULL;
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
@@ -2183,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* set inode->i_size at the end of a write. */
if (should_remove_suid(dentry)) {
if (meta_level == 0) {
- ocfs2_inode_unlock(inode, meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
meta_level = 1;
continue;
}
@@ -2197,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
ret = ocfs2_check_range_for_refcount(inode, pos, count);
if (ret == 1) {
- ocfs2_inode_unlock(inode, meta_level);
- meta_level = -1;
-
- ret = ocfs2_prepare_inode_for_refcount(inode,
- file,
- pos,
- count,
- &meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
+ ret = ocfs2_inode_lock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ overwrite_io,
+ 1,
+ wait);
+ write_sem = 1;
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ clusters =
+ ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
}
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out_unlock;
}
@@ -2219,10 +2265,10 @@ out_unlock:
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
pos, count, wait);
- brelse(di_bh);
-
- if (meta_level >= 0)
- ocfs2_inode_unlock(inode, meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
out:
return ret;
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index e94b19782c92..ce4103208619 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -25,13 +25,6 @@ static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
}
#endif /* __arch_get_clock_mode */
-#ifndef __arch_use_vsyscall
-static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata)
-{
- return 1;
-}
-#endif /* __arch_use_vsyscall */
-
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
struct timekeeper *tk)
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 01f514521687..7865e6b5d36c 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -44,7 +44,20 @@ struct drm_gem_shmem_object {
*/
unsigned int pages_use_count;
+ /**
+ * @madv: State for madvise
+ *
+ * 0 is active/inuse.
+ * A negative value is the object is purged.
+ * Positive values are driver specific and not used by the helpers.
+ */
int madv;
+
+ /**
+ * @madv_list: List entry for madvise tracking
+ *
+ * Typically used by drivers to track purgeable objects
+ */
struct list_head madv_list;
/**
diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h
index 5b79d253fb46..520235c20708 100644
--- a/include/drm/drm_self_refresh_helper.h
+++ b/include/drm/drm_self_refresh_helper.h
@@ -13,7 +13,8 @@ struct drm_crtc;
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
- unsigned int commit_time_ms);
+ unsigned int commit_time_ms,
+ unsigned int new_self_refresh_mask);
int drm_self_refresh_helper_init(struct drm_crtc *crtc);
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
diff --git a/include/dt-bindings/net/qca-ar803x.h b/include/dt-bindings/net/qca-ar803x.h
new file mode 100644
index 000000000000..9c046c7242ed
--- /dev/null
+++ b/include/dt-bindings/net/qca-ar803x.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Qualcomm Atheros AR803x PHYs
+ */
+
+#ifndef _DT_BINDINGS_QCA_AR803X_H
+#define _DT_BINDINGS_QCA_AR803X_H
+
+#define AR803X_STRENGTH_FULL 0
+#define AR803X_STRENGTH_HALF 1
+#define AR803X_STRENGTH_QUARTER 2
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
new file mode 100644
index 000000000000..218b1a64e975
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Texas Instruments DP83869 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2019 Texas Instruments, Inc.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83869_H
+#define _DT_BINDINGS_TI_DP83869_H
+
+/* PHY CTRL bits */
+#define DP83869_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83869_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83869_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83869_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* IO_MUX_CFG - Clock output selection */
+#define DP83869_CLK_O_SEL_CHN_A_RCLK 0x0
+#define DP83869_CLK_O_SEL_CHN_B_RCLK 0x1
+#define DP83869_CLK_O_SEL_CHN_C_RCLK 0x2
+#define DP83869_CLK_O_SEL_CHN_D_RCLK 0x3
+#define DP83869_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4
+#define DP83869_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5
+#define DP83869_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6
+#define DP83869_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7
+#define DP83869_CLK_O_SEL_CHN_A_TCLK 0x8
+#define DP83869_CLK_O_SEL_CHN_B_TCLK 0x9
+#define DP83869_CLK_O_SEL_CHN_C_TCLK 0xa
+#define DP83869_CLK_O_SEL_CHN_D_TCLK 0xb
+#define DP83869_CLK_O_SEL_REF_CLK 0xc
+
+#define DP83869_RGMII_COPPER_ETHERNET 0x00
+#define DP83869_RGMII_1000_BASE 0x01
+#define DP83869_RGMII_100_BASE 0x02
+#define DP83869_RGMII_SGMII_BRIDGE 0x03
+#define DP83869_1000M_MEDIA_CONVERT 0x04
+#define DP83869_100M_MEDIA_CONVERT 0x05
+#define DP83869_SGMII_COPPER_ETHERNET 0x06
+
+#endif
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 8339071ab08b..e20a0cd09ba5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
+void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
deleted file mode 100644
index 9e5ac27fb6c1..000000000000
--- a/include/linux/can/platform/mcp251x.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CAN_PLATFORM_MCP251X_H
-#define _CAN_PLATFORM_MCP251X_H
-
-/*
- *
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- */
-
-#include <linux/spi/spi.h>
-
-/*
- * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
- * @oscillator_frequency: - oscillator frequency in Hz
- */
-
-struct mcp251x_platform_data {
- unsigned long oscillator_frequency;
-};
-
-#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 01219f2902bf..1b78a0cfb615 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -15,9 +15,9 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
u32 skb_queue_len_max;
@@ -44,7 +44,6 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d0633ebdaa9c..bc6c879bd110 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,6 +59,11 @@ extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -213,28 +218,7 @@ static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
-/*
- * These are used for a global "mitigations=" cmdline option for toggling
- * optional CPU mitigations.
- */
-enum cpu_mitigations {
- CPU_MITIGATIONS_OFF,
- CPU_MITIGATIONS_AUTO,
- CPU_MITIGATIONS_AUTO_NOSMT,
-};
-
-extern enum cpu_mitigations cpu_mitigations;
-
-/* mitigations=off */
-static inline bool cpu_mitigations_off(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_OFF;
-}
-
-/* mitigations=auto,nosmt */
-static inline bool cpu_mitigations_auto_nosmt(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
-}
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 2d8aaf7d4b9e..81ca84ce3119 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -20,4 +20,19 @@ static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index a8f888976137..ef1cbb5f454f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -46,4 +46,18 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 4ec8986e5dfb..ac6e946b6767 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -185,7 +185,7 @@ static inline void idr_preload_end(void)
* is convenient for a "not found" value.
*/
#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
/**
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 719fc3e15ea4..d41c521a39da 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -966,6 +966,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1382,4 +1383,10 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr);
+
#endif
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 0ebb105eb261..4c75dae8dd29 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
+extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3e80f03a387f..1884513aac90 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1121,6 +1121,11 @@ static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_PF;
}
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_VF;
+}
+
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
@@ -1186,4 +1191,15 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+
+ devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return val.vbool;
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 724d276ea133..4e5b84e66822 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -80,7 +80,8 @@ enum mlx5_flow_namespace_type {
enum {
FDB_BYPASS_PATH,
- FDB_FAST_PATH,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 0836fe232f97..4f912d4e67bc 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1153,7 +1153,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x10];
- u8 reserved_at_c0[0x8];
+ u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
u8 reserved_at_d0[0xb];
u8 log_max_cq[0x5];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cc292273e6ba..a2adf95b3f9c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -695,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr);
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2222fa795284..270aa8fd2800 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -221,6 +221,11 @@ struct page {
#endif
} _struct_page_alignment;
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+ return &page[1].compound_mapcount;
+}
+
/*
* Used for sizing the vmemmap region on some architectures
*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1f140a6b66df..9e6fb8524d91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -848,6 +848,7 @@ enum tc_setup_type {
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
+ TC_SETUP_FT,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -2396,11 +2397,23 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&lstats->syncp);
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
+ u64_stats_update_end(&lstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 6aeaea1775e6..71bbfcf3adcd 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -6,15 +6,18 @@
#ifndef __LINUX_OF_NET_H
#define __LINUX_OF_NET_H
+#include <linux/phy.h>
+
#ifdef CONFIG_OF_NET
#include <linux/of.h>
struct net_device;
-extern int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern const void *of_get_mac_address(struct device_node *np);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
-static inline int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np,
+ phy_interface_t *interface)
{
return -ENODEV;
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f91cb8898ff0..1bf83c8fcaa7 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -622,12 +622,28 @@ static inline int PageTransCompound(struct page *page)
*
* Unlike PageTransCompound, this is safe to be called only while
* split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * MMU notifier, otherwise it may result in page->_mapcount check false
* positives.
+ *
+ * We have to treat page cache THP differently since every subpage of it
+ * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
+ * mapped in the current process so comparing subpage's _mapcount to
+ * compound_mapcount to filter out PTE mapped case.
*/
static inline int PageTransCompoundMap(struct page *page)
{
- return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+ struct page *head;
+
+ if (!PageTransCompound(page))
+ return 0;
+
+ if (PageAnon(page))
+ return atomic_read(&page->_mapcount) < 0;
+
+ head = compound_head(page);
+ /* File THP is PMD mapped and not PTE mapped */
+ return atomic_read(&page->_mapcount) ==
+ atomic_read(compound_mapcount_ptr(head));
}
/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 78436d58ce7c..124516fe2763 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -203,6 +203,8 @@ static inline const char *phy_modes(phy_interface_t interface)
struct device;
struct phylink;
+struct sfp_bus;
+struct sfp_upstream_ops;
struct sk_buff;
/*
@@ -342,6 +344,8 @@ struct phy_c45_device_ids {
* dev_flags: Device-specific flags used by the PHY driver.
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
+ * sfp_bus_attached: flag indicating whether the SFP bus has been attached
+ * sfp_bus: SFP bus attached to this PHY's fiber port
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -432,6 +436,9 @@ struct phy_device {
struct mutex lock;
+ /* This may be modified under the rtnl lock */
+ bool sfp_bus_attached;
+ struct sfp_bus *sfp_bus;
struct phylink *phylink;
struct net_device *attached_dev;
@@ -1020,6 +1027,10 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index b5116013f27e..63e62372443a 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -316,24 +316,6 @@ radix_tree_iter_lookup(const struct radix_tree_root *root,
}
/**
- * radix_tree_iter_find - find a present entry
- * @root: radix tree root
- * @iter: iterator state
- * @index: start location
- *
- * This function returns the slot containing the entry with the lowest index
- * which is at least @index. If @index is larger than any present entry, this
- * function returns NULL. The @iter is updated to describe the entry found.
- */
-static inline void __rcu **
-radix_tree_iter_find(const struct radix_tree_root *root,
- struct radix_tree_iter *iter, unsigned long index)
-{
- radix_tree_iter_init(iter, index);
- return radix_tree_next_chunk(root, iter, 0);
-}
-
-/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 9326d671b6e6..eaae6b4e9f24 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -7,7 +7,7 @@
struct reset_controller_dev;
/**
- * struct reset_control_ops
+ * struct reset_control_ops - reset controller driver callbacks
*
* @reset: for self-deasserting resets, does all necessary
* things to reset the device
@@ -33,7 +33,7 @@ struct of_phandle_args;
* @provider: name of the reset controller device controlling this reset line
* @index: ID of the reset controller in the reset controller device
* @dev_id: name of the device associated with this reset line
- * @con_id name of the reset line (can be NULL)
+ * @con_id: name of the reset line (can be NULL)
*/
struct reset_control_lookup {
struct list_head list;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index e7793fc0fa93..eb597e8aa430 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -143,7 +143,7 @@ static inline int device_reset_optional(struct device *dev)
* If this function is called more than once for the same reset_control it will
* return -EBUSY.
*
- * See reset_control_get_shared for details on shared references to
+ * See reset_control_get_shared() for details on shared references to
* reset-controls.
*
* Use of id names is optional.
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 355a08a76fd4..3b35efd85bb1 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -508,10 +508,11 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream_node(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops);
-void sfp_unregister_upstream(struct sfp_bus *bus);
+void sfp_bus_put(struct sfp_bus *bus);
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_bus_del_upstream(struct sfp_bus *bus);
#else
static inline int sfp_parse_port(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
@@ -553,14 +554,22 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_register_upstream_node(
- struct fwnode_handle *fwnode, void *upstream,
- const struct sfp_upstream_ops *ops)
+static inline void sfp_bus_put(struct sfp_bus *bus)
+{
+}
+
+static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline void sfp_unregister_upstream(struct sfp_bus *bus)
+static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ return 0;
+}
+
+static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 53238ac725a3..dfe02b658829 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1795,7 +1795,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1861,7 +1861,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index fe80d537945d..6cb077b646a5 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -140,6 +140,11 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
}
}
+static inline u32 sk_msg_iter_dist(u32 start, u32 end)
+{
+ return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
+}
+
#define sk_msg_iter_var_prev(var) \
do { \
if (var == 0) \
@@ -199,9 +204,7 @@ static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
if (sk_msg_full(msg))
return MAX_MSG_FRAGS;
- return msg->sg.end >= msg->sg.start ?
- msg->sg.end - msg->sg.start :
- msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
+ return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
}
static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 86f9464c3f5d..d4bcd9387136 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,6 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
+#include <linux/phy.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -132,7 +133,7 @@ struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
int interface;
- int phy_interface;
+ phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
struct device_node *phylink_node;
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
index 267369110584..85ec745767bd 100644
--- a/include/linux/sxgbe_platform.h
+++ b/include/linux/sxgbe_platform.h
@@ -10,6 +10,8 @@
#ifndef __SXGBE_PLATFORM_H__
#define __SXGBE_PLATFORM_H__
+#include <linux/phy.h>
+
/* MDC Clock Selection define*/
#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
@@ -38,7 +40,7 @@ struct sxgbe_plat_data {
char *phy_bus_name;
int bus_id;
int phy_addr;
- int interface;
+ phy_interface_t interface;
struct sxgbe_mdio_bus_data *mdio_bus_data;
struct sxgbe_dma_cfg *dma_cfg;
int clk_csr;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed0..9de5c10293f5 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -40,8 +40,8 @@
* spin_lock_bh(...) or other synchronization to get exclusive access
* ...
* u64_stats_update_begin(&stats->syncp);
- * stats->bytes64 += len; // non atomic operation
- * stats->packets64++; // non atomic operation
+ * u64_stats_add(&stats->bytes64, len); // non atomic operation
+ * u64_stats_inc(&stats->packets64); // non atomic operation
* u64_stats_update_end(&stats->syncp);
*
* While a consumer (reader) should use following template to get consistent
@@ -52,8 +52,8 @@
*
* do {
* start = u64_stats_fetch_begin(&stats->syncp);
- * tbytes = stats->bytes64; // non atomic operation
- * tpackets = stats->packets64; // non atomic operation
+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
* } while (u64_stats_fetch_retry(&stats->syncp, start));
*
*
@@ -68,6 +68,49 @@ struct u64_stats_sync {
#endif
};
+#if BITS_PER_LONG == 64
+#include <asm/local64.h>
+
+typedef struct {
+ local64_t v;
+} u64_stats_t ;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return local64_read(&p->v);
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ local64_add(val, &p->v);
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ local64_inc(&p->v);
+}
+
+#else
+
+typedef struct {
+ u64 v;
+} u64_stats_t;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return p->v;
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ p->v += val;
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ p->v++;
+}
+#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 07875ccc7bb5..71c81e0dc8f2 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,9 +7,6 @@
#include <net/sock.h>
#include <net/af_vsock.h>
-#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
-#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
-#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -25,11 +22,6 @@ enum {
struct virtio_vsock_sock {
struct vsock_sock *vsk;
- /* Protected by lock_sock(sk_vsock(trans->vsk)) */
- u32 buf_size;
- u32 buf_size_min;
- u32 buf_size_max;
-
spinlock_t tx_lock;
spinlock_t rx_lock;
@@ -92,12 +84,6 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
-u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
-void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
size_t target,
@@ -124,6 +110,7 @@ int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data);
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
ssize_t written, struct vsock_transport_send_notify_data *data);
+void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
@@ -150,7 +137,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h
deleted file mode 100644
index 33f1a2ecd905..000000000000
--- a/include/linux/vm_sockets.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * VMware vSockets Driver
- *
- * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
- */
-
-#ifndef _VM_SOCKETS_H
-#define _VM_SOCKETS_H
-
-#include <uapi/linux/vm_sockets.h>
-
-int vm_sockets_get_local_cid(void);
-
-#endif /* _VM_SOCKETS_H */
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index acd9fafe4fc6..f28907345c80 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -19,6 +19,7 @@
struct msghdr;
typedef void (vmci_device_shutdown_fn) (void *device_registration,
void *user_data);
+typedef void (*vmci_vsock_cb) (bool is_host);
int vmci_datagram_create_handle(u32 resource_id, u32 flags,
vmci_datagram_recv_cb recv_cb,
@@ -37,6 +38,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle);
int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
u32 vmci_get_context_id(void);
bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+int vmci_register_vsock_callback(vmci_vsock_cb callback);
int vmci_event_subscribe(u32 event,
vmci_event_cb callback, void *callback_data,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 0495bdc034d2..71347a90a9d1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,7 +23,6 @@ struct tc_action_ops;
struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
- __u32 order;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 80ea0f93d3f7..4206dc6d813f 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
#include "vsock_addr.h"
@@ -27,6 +27,7 @@ extern spinlock_t vsock_table_lock;
struct vsock_sock {
/* sk must be the first member. */
struct sock sk;
+ const struct vsock_transport *transport;
struct sockaddr_vm local_addr;
struct sockaddr_vm remote_addr;
/* Links for the global tables of bound and connected sockets. */
@@ -64,16 +65,18 @@ struct vsock_sock {
bool sent_request;
bool ignore_connecting_rst;
+ /* Protected by lock_sock(sk) */
+ u64 buffer_size;
+ u64 buffer_min_size;
+ u64 buffer_max_size;
+
/* Private to transport. */
void *trans;
};
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority, unsigned short type, int kern);
+struct sock *vsock_create_connected(struct sock *parent);
/**** TRANSPORT ****/
@@ -88,7 +91,17 @@ struct vsock_transport_send_notify_data {
u64 data2; /* Transport-defined. */
};
+/* Transport features flags */
+/* Transport provides host->guest communication */
+#define VSOCK_TRANSPORT_F_H2G 0x00000001
+/* Transport provides guest->host communication */
+#define VSOCK_TRANSPORT_F_G2H 0x00000002
+/* Transport provides DGRAM communication */
+#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
+
struct vsock_transport {
+ struct module *module;
+
/* Initialize/tear-down socket. */
int (*init)(struct vsock_sock *, struct vsock_sock *);
void (*destruct)(struct vsock_sock *);
@@ -139,33 +152,23 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
struct vsock_transport_send_notify_data *);
+ /* sk_lock held by the caller */
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
- /* Buffer sizes. */
- void (*set_buffer_size)(struct vsock_sock *, u64);
- void (*set_min_buffer_size)(struct vsock_sock *, u64);
- void (*set_max_buffer_size)(struct vsock_sock *, u64);
- u64 (*get_buffer_size)(struct vsock_sock *);
- u64 (*get_min_buffer_size)(struct vsock_sock *);
- u64 (*get_max_buffer_size)(struct vsock_sock *);
-
/* Addressing. */
u32 (*get_local_cid)(void);
};
/**** CORE ****/
-int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
-static inline int vsock_core_init(const struct vsock_transport *t)
-{
- return __vsock_core_init(t, THIS_MODULE);
-}
-void vsock_core_exit(void);
+int vsock_core_register(const struct vsock_transport *t, int features);
+void vsock_core_unregister(const struct vsock_transport *t);
/* The transport may downcast this to access transport-specific functions */
-const struct vsock_transport *vsock_core_get_transport(void);
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk);
/**** UTILS ****/
@@ -193,6 +196,8 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
+bool vsock_find_cid(unsigned int cid);
/**** TAP ****/
diff --git a/include/net/arp.h b/include/net/arp.h
index c8f580a0e6b1..4950191f6b2b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 1afc125014da..3d56b026bb9e 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -159,7 +159,6 @@ struct slave {
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 link_new_state; /* one of BOND_LINK_XXXX */
- s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
@@ -549,7 +548,7 @@ static inline void bond_propose_link_state(struct slave *slave, int state)
static inline void bond_commit_link_state(struct slave *slave, bool notify)
{
- if (slave->link == slave->link_new_state)
+ if (slave->link_new_state == BOND_LINK_NOCHANGE)
return;
slave->link = slave->link_new_state;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ab6850bbba99..5ded77fad7fb 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -565,6 +565,7 @@ struct vif_params {
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
+ * @vlan_id: vlan_id for VLAN group key (if nonzero)
* @mode: key install mode (RX_TX, NO_TX or SET_TX)
*/
struct key_params {
@@ -572,6 +573,7 @@ struct key_params {
const u8 *seq;
int key_len;
int seq_len;
+ u16 vlan_id;
u32 cipher;
enum nl80211_key_mode mode;
};
@@ -1124,6 +1126,7 @@ struct sta_txpwr {
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
+ * @vlan_id: VLAN ID for station (if nonzero)
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
@@ -1159,6 +1162,7 @@ struct station_parameters {
u32 sta_modify_mask;
int listen_interval;
u16 aid;
+ u16 vlan_id;
u16 peer_aid;
u8 supported_rates_len;
u8 plink_action;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6bf3b9e0595a..47f87b2fcf63 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -38,8 +38,9 @@ struct devlink {
struct device *dev;
possible_net_t _net;
struct mutex lock;
- bool reload_failed;
- bool registered;
+ u8 reload_failed:1,
+ reload_enabled:1,
+ registered:1;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -401,6 +402,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -435,6 +437,9 @@ enum devlink_param_generic_id {
"reset_dev_on_drv_probe"
#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -569,6 +574,21 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
DEVLINK_TRAP_GENERIC_ID_TAIL_DROP,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -607,6 +627,36 @@ enum devlink_trap_group_generic_id {
"ttl_value_is_too_small"
#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \
"tail_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_IP_PACKET \
+ "non_ip"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_DIP_MC_DMAC \
+ "uc_dip_over_mc_dmac"
+#define DEVLINK_TRAP_GENERIC_NAME_DIP_LB \
+ "dip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_MC \
+ "sip_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_LB \
+ "sip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_CORRUPTED_IP_HDR \
+ "ip_header_corrupted"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_SIP_BC \
+ "ipv4_sip_is_limited_bc"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_RESERVED_SCOPE \
+ "ipv6_mc_dip_reserved_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE \
+ "ipv6_mc_dip_interface_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_MTU_ERROR \
+ "mtu_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_UNRESOLVED_NEIGH \
+ "unresolved_neigh"
+#define DEVLINK_TRAP_GENERIC_NAME_RPF \
+ "mc_reverse_path_forwarding"
+#define DEVLINK_TRAP_GENERIC_NAME_REJECT_ROUTE \
+ "reject_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_LPM_UNICAST_MISS \
+ "ipv4_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
+ "ipv6_lpm_miss"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -779,6 +829,8 @@ void devlink_net_set(struct devlink *devlink, struct net *net);
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink);
+void devlink_reload_enable(struct devlink *devlink);
+void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -919,8 +971,6 @@ int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value);
@@ -933,7 +983,7 @@ int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
const char *value);
int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len);
+ const void *value, u32 value_len);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index e4c697b95c70..6767dc3f66c0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -42,6 +42,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_8021Q_VALUE 12
#define DSA_TAG_PROTO_SJA1105_VALUE 13
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
+#define DSA_TAG_PROTO_OCELOT_VALUE 15
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -59,6 +60,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
+ DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
};
struct packet_type;
@@ -586,6 +588,22 @@ int dsa_devlink_params_register(struct dsa_switch *ds,
void dsa_devlink_params_unregister(struct dsa_switch *ds,
const struct devlink_param *params,
size_t params_count);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id);
+
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index b1063db63e66..f06b0239c32b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -48,9 +48,12 @@ struct flow_dissector_key_tags {
};
struct flow_dissector_key_vlan {
- u16 vlan_id:12,
- vlan_dei:1,
- vlan_priority:3;
+ union {
+ u16 vlan_id:12,
+ vlan_dei:1,
+ vlan_priority:3;
+ __be16 vlan_tci;
+ };
__be16 vlan_tpid;
};
@@ -203,9 +206,11 @@ struct flow_dissector_key_ip {
/**
* struct flow_dissector_key_meta:
* @ingress_ifindex: ingress ifindex
+ * @ingress_iftype: ingress interface type
*/
struct flow_dissector_key_meta {
int ingress_ifindex;
+ u16 ingress_iftype;
};
/**
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 107c0d700ed6..38a9a3d1222b 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -313,7 +313,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
- fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+ fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
if (!fq->flows)
return -ENOMEM;
@@ -331,7 +331,7 @@ static void fq_reset(struct fq *fq,
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_reset(fq, &fq->flows[i], free_func);
- kfree(fq->flows);
+ kvfree(fq->flows);
fq->flows = NULL;
}
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ca23860adbb9..1424e02cef90 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,6 +7,12 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+/* Note: this used to be in include/uapi/linux/gen_stats.h */
+struct gnet_stats_basic_packed {
+ __u64 bytes;
+ __u64 packets;
+};
+
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d69081c38788..c643a19dce96 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -312,7 +312,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
- * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * @BSS_CHANGED_FTM_RESPONDER: fine timing measurement request responder
* functionality changed for this BSS (AP mode).
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
@@ -967,6 +967,7 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @band: the band to transmit on (use for checking for races)
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
* @ack_frame_id: internal frame ID for TX status, used internally
+ * @tx_time_est: TX time estimate in units of 4us, used internally
* @control: union part for control data
* @control.rates: TX rates array to try
* @control.rts_cts_rate_idx: rate for RTS or CTS
@@ -1007,7 +1008,8 @@ struct ieee80211_tx_info {
u8 hw_queue;
- u16 ack_frame_id;
+ u16 ack_frame_id:6;
+ u16 tx_time_est:10;
union {
struct {
@@ -1059,7 +1061,7 @@ struct ieee80211_tx_info {
};
/**
- * struct ieee80211_tx_status - extended tx staus info for rate control
+ * struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
@@ -1702,7 +1704,7 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
* @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
* driver for a CCMP/GCMP key to indicate that is requires IV generation
- * only for managment frames (MFP).
+ * only for management frames (MFP).
* @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
* driver for a key to indicate that sufficient tailroom must always
* be reserved for ICV or MIC, even when HW encryption is enabled.
@@ -1998,7 +2000,7 @@ struct ieee80211_sta {
*
* * If the skb is transmitted as part of a BA agreement, the
* A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA aggreement, the A-MSDU maximal
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
* size is min(max_amsdu_len, 7935) bytes.
*
* Both additional HT limits must be enforced by the low level
@@ -2626,7 +2628,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* @hw: the hardware
* @skb: the skb
*
- * Free a transmit skb. Use this funtion when some failure
+ * Free a transmit skb. Use this function when some failure
* to transmit happened and thus status cannot be reported.
*/
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -3187,13 +3189,13 @@ enum ieee80211_rate_control_changed {
*
* With the support for multi channel contexts and multi channel operations,
* remain on channel operations might be limited/deferred/aborted by other
- * flows/operations which have higher priority (and vise versa).
+ * flows/operations which have higher priority (and vice versa).
* Specifying the ROC type can be used by devices to prioritize the ROC
* operations compared to other operations/flows.
*
* @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
* @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
- * for sending managment frames offchannel.
+ * for sending management frames offchannel.
*/
enum ieee80211_roc_type {
IEEE80211_ROC_TYPE_NORMAL = 0,
@@ -5616,7 +5618,7 @@ void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
/**
* ieee80211_iter_chan_contexts_atomic - iterate channel contexts
- * @hw: pointre obtained from ieee80211_alloc_hw().
+ * @hw: pointer obtained from ieee80211_alloc_hw().
* @iter: iterator function
* @iter_data: data passed to iterator function
*
@@ -6364,7 +6366,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
* again.
*
* The API ieee80211_txq_may_transmit() also ensures that TXQ list will be
- * aligned aginst driver's own round-robin scheduler list. i.e it rotates
+ * aligned against driver's own round-robin scheduler list. i.e it rotates
* the TXQ list till it makes the requested node becomes the first entry
* in TXQ list. Thus both the TXQ list and driver's list are in sync. If this
* function returns %true, the driver is expected to schedule packets
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b2f715ca0567..b5ebeb3b0de0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
@@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 50a67bd6a434..6ad9ad47a9c5 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -439,8 +439,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
unsigned long now = jiffies;
- if (neigh->used != now)
- neigh->used = now;
+ if (READ_ONCE(neigh->used) != now)
+ WRITE_ONCE(neigh->used, now);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
return __neigh_event_send(neigh, skb);
return 0;
@@ -468,7 +468,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 158514281a75..f0897b3c97fb 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -8,25 +8,43 @@
#include <linux/rcupdate.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/flow_offload.h>
#include <net/dst.h>
struct nf_flowtable;
+struct nf_flow_rule;
+struct flow_offload;
+enum flow_offload_tuple_dir;
struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ int (*setup)(struct nf_flowtable *ft,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+ int (*action)(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
+enum nf_flowtable_flags {
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1,
+};
+
struct nf_flowtable {
struct list_head list;
struct rhashtable rhashtable;
int priority;
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
+ unsigned int flags;
+ struct flow_block flow_block;
+ possible_net_t net;
};
enum flow_offload_tuple_dir {
@@ -69,14 +87,22 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_DNAT 0x2
#define FLOW_OFFLOAD_DYING 0x4
#define FLOW_OFFLOAD_TEARDOWN 0x8
+#define FLOW_OFFLOAD_HW 0x10
+#define FLOW_OFFLOAD_HW_DYING 0x20
+#define FLOW_OFFLOAD_HW_DEAD 0x40
+
+enum flow_offload_type {
+ NF_FLOW_OFFLOAD_UNSPEC = 0,
+ NF_FLOW_OFFLOAD_ROUTE,
+};
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
- u32 flags;
- union {
- /* Your private driver data here. */
- u32 timeout;
- };
+ struct nf_conn *ct;
+ u16 flags;
+ u16 type;
+ u32 timeout;
+ struct rcu_head rcu_head;
};
#define NF_FLOW_TIMEOUT (30 * HZ)
@@ -87,10 +113,12 @@ struct nf_flow_route {
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
-struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
- struct nf_flow_route *route);
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route);
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
@@ -124,4 +152,25 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+
+int nf_flow_table_offload_init(void);
+void nf_flow_table_offload_exit(void);
+
#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5bf569e1173b..fe7c50acc681 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -114,7 +114,7 @@ static inline void nft_reg_store8(u32 *dreg, u8 val)
*(u8 *)dreg = val;
}
-static inline u8 nft_reg_load8(u32 *sreg)
+static inline u8 nft_reg_load8(const u32 *sreg)
{
return *(u8 *)sreg;
}
@@ -125,7 +125,7 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
-static inline u16 nft_reg_load16(u32 *sreg)
+static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
@@ -135,7 +135,7 @@ static inline void nft_reg_store64(u32 *dreg, u64 val)
put_unaligned(val, (u64 *)dreg);
}
-static inline u64 nft_reg_load64(u32 *sreg)
+static inline u64 nft_reg_load64(const u32 *sreg)
{
return get_unaligned((u64 *)sreg);
}
@@ -820,7 +820,8 @@ struct nft_expr_ops {
*/
struct nft_expr {
const struct nft_expr_ops *ops;
- unsigned char data[];
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
};
static inline void *nft_expr_priv(const struct nft_expr *expr)
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 03cf5856d76f..ea7d1d78b92d 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -45,6 +45,7 @@ struct nft_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_eth_addrs eth_addrs;
+ struct flow_dissector_key_meta meta;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct nft_flow_match {
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index bdc0f27b8514..d8d02e4188d1 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -89,6 +89,12 @@ struct netns_sctp {
*/
int pf_retrans;
+ /* Primary.Switchover.Max.Retrans sysctl value
+ * taken from:
+ * https://tools.ietf.org/html/rfc7829
+ */
+ int ps_retrans;
+
/*
* Disable Potentially-Failed feature, the feature is enabled by default
* pf_enable - 0 : disable pf
@@ -97,6 +103,14 @@ struct netns_sctp {
int pf_enable;
/*
+ * Disable Potentially-Failed state exposure, ignored by default
+ * pf_expose - 0 : compatible with old applications (by default)
+ * - 1 : disable pf state exposure
+ * - 2 : enable pf state exposure
+ */
+ int pf_expose;
+
+ /*
* Policy for preforming sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..cfbed00ba7ee 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -34,8 +34,18 @@
#include <linux/ptr_ring.h>
#include <linux/dma-direction.h>
-#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
-#define PP_FLAG_ALL PP_FLAG_DMA_MAP
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
/*
* Fast allocation side cache array/stack
@@ -65,12 +75,19 @@ struct page_pool_params {
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
enum dma_data_direction dma_dir; /* DMA mapping direction */
+ unsigned int max_len; /* max DMA sync memory size */
+ unsigned int offset; /* DMA addr offset */
};
struct page_pool {
struct page_pool_params p;
- u32 pages_state_hold_cnt;
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+ u32 pages_state_hold_cnt;
/*
* Data structure for allocation side
@@ -107,6 +124,8 @@ struct page_pool {
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;
+
+ u64 destroy_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -129,29 +148,23 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
struct page_pool *page_pool_create(const struct page_pool_params *params);
-void __page_pool_free(struct page_pool *pool);
-static inline void page_pool_free(struct page_pool *pool)
-{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
#ifdef CONFIG_PAGE_POOL
- __page_pool_free(pool);
-#endif
-}
-
-/* Drivers use this instead of page_pool_free */
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+#else
static inline void page_pool_destroy(struct page_pool *pool)
{
- if (!pool)
- return;
+}
- page_pool_free(pool);
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *))
+{
}
+#endif
/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct);
+void __page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct);
static inline void page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct)
@@ -160,32 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, allow_direct);
+ __page_pool_put_page(pool, page, -1, allow_direct);
#endif
}
/* Very limited use-cases allow recycle direct */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, true);
-}
-
-/* API user MUST have disconnected alloc-side (not allowed to call
- * page_pool_alloc_pages()) before calling this. The free-side can
- * still run concurrently, to handle in-flight packet-pages.
- *
- * A request to shutdown can fail (with false) if there are still
- * in-flight packet-pages.
- */
-bool __page_pool_request_shutdown(struct page_pool *pool);
-static inline bool page_pool_request_shutdown(struct page_pool *pool)
-{
- bool safe_to_remove = false;
-
-#ifdef CONFIG_PAGE_POOL
- safe_to_remove = __page_pool_request_shutdown(pool);
-#endif
- return safe_to_remove;
+ __page_pool_put_page(pool, page, -1, true);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -216,14 +211,16 @@ static inline bool is_page_pool_compiled_in(void)
#endif
}
-static inline void page_pool_get(struct page_pool *pool)
-{
- refcount_inc(&pool->user_cnt);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a8b0a9a4c686..144f264ea394 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
+#include <linux/hashtable.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
@@ -148,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return qdisc->empty;
- return !qdisc->q.qlen;
+ return READ_ONCE(qdisc->empty);
+ return !READ_ONCE(qdisc->q.qlen);
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
@@ -157,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
if (qdisc->flags & TCQ_F_NOLOCK) {
if (!spin_trylock(&qdisc->seqlock))
return false;
- qdisc->empty = false;
+ WRITE_ONCE(qdisc->empty, false);
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -362,6 +363,7 @@ struct tcf_proto {
bool deleting;
refcount_t refcnt;
struct rcu_head rcu;
+ struct hlist_node destroy_ht_node;
};
struct qdisc_skb_cb {
@@ -414,6 +416,8 @@ struct tcf_block {
struct list_head filter_chain_list;
} chain0;
struct rcu_head rcu;
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 823afc42a3aa..15b4d9aec7ff 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -286,6 +286,18 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
+/* These are the values for pf exposure, UNUSED is to keep compatible with old
+ * applications by default.
+ */
+enum {
+ SCTP_PF_EXPOSE_UNSET,
+ SCTP_PF_EXPOSE_DISABLE,
+ SCTP_PF_EXPOSE_ENABLE,
+};
+#define SCTP_PF_EXPOSE_MAX SCTP_PF_EXPOSE_ENABLE
+
+#define SCTP_PS_RETRANS_MAX 0xffff
+
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 503fbc3cd819..3cc913f328cd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -184,7 +184,8 @@ struct sctp_sock {
__u32 flowlabel;
__u8 dscp;
- int pf_retrans;
+ __u16 pf_retrans;
+ __u16 ps_retrans;
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -215,6 +216,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ pf_expose:2,
reuse:1,
disable_fragments:1,
v4mapped:1,
@@ -896,7 +898,9 @@ struct sctp_transport {
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1772,7 +1776,9 @@ struct sctp_association {
* and will be initialized from the assocs value. This can be
* changed using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* Maximum number of times the endpoint will retransmit INIT */
__u16 max_init_attempts;
@@ -2053,6 +2059,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
+ pf_expose:2, /* Expose pf state? */
force_delay:1;
__u8 strreset_enable;
diff --git a/include/net/smc.h b/include/net/smc.h
index 05174ae4f325..646feb4bc75f 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_ERROR 0xFFFF
+
struct smcd_event {
u32 type;
u32 code;
@@ -77,6 +79,8 @@ struct smcd_dev {
bool pnetid_by_user;
struct list_head lgr_list;
spinlock_t lgr_lock;
+ atomic_t lgr_cnt;
+ wait_queue_head_t lgrs_deleted;
u8 going_away : 1;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index ac6042d0af32..e7f697174f84 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -859,17 +859,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
static inline void sk_acceptq_removed(struct sock *sk)
{
- sk->sk_ack_backlog--;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
}
static inline void sk_acceptq_added(struct sock *sk)
{
- sk->sk_ack_backlog++;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
}
/*
@@ -899,11 +899,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
- sk->sk_backlog.head = skb;
+ WRITE_ONCE(sk->sk_backlog.head, skb);
else
sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb;
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
skb->next = NULL;
}
@@ -1939,8 +1939,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline void sk_dst_confirm(struct sock *sk)
{
- if (!sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 1;
+ if (!READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
}
static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
@@ -1950,10 +1950,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
- if (sk && sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 0;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
@@ -2341,7 +2341,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
return kt;
#else
- return sk->sk_stamp;
+ return READ_ONCE(sk->sk_stamp);
#endif
}
@@ -2352,7 +2352,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk->sk_stamp = kt;
write_sequnlock(&sk->sk_stamp_seq);
#else
- sk->sk_stamp = kt;
+ WRITE_ONCE(sk->sk_stamp, kt);
#endif
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ab4eb5eb5d07..36f195fb576a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -537,7 +537,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-u64 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -757,10 +757,16 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u32 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
- return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(tcp_clock_ns());
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -772,7 +778,7 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
}
/* provide the departure time in us unit */
diff --git a/include/net/tls.h b/include/net/tls.h
index 41265e542e71..e6becd6218a3 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -40,6 +40,7 @@
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/skmsg.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
@@ -247,6 +248,10 @@ struct tls_context {
bool in_tcp_sendpages;
bool pending_open_record_frags;
+
+ struct mutex tx_lock; /* protects partially_sent_* fields and
+ * per-type TX fields
+ */
unsigned long flags;
/* cache cold stuff */
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
index 57d2db5c4bdf..cf8cc140d68d 100644
--- a/include/net/vsock_addr.h
+++ b/include/net/vsock_addr.h
@@ -8,7 +8,7 @@
#ifndef _VSOCK_ADDR_H_
#define _VSOCK_ADDR_H_
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
int vsock_addr_validate(const struct sockaddr_vm *addr);
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
index 6a8cba6ea79a..a9d5b7603b89 100644
--- a/include/net/xdp_priv.h
+++ b/include/net/xdp_priv.h
@@ -12,12 +12,8 @@ struct xdp_mem_allocator {
struct page_pool *page_pool;
struct zero_copy_allocator *zc_alloc;
};
- int disconnect_cnt;
- unsigned long defer_start;
struct rhash_head node;
struct rcu_head rcu;
- struct delayed_work defer_wq;
- unsigned long defer_warn;
};
#endif /* __LINUX_NET_XDP_PRIV_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index aa08a7a5f6ac..dda3c025452e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1613,13 +1613,6 @@ static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optv
{
return -ENOPROTOOPT;
}
-
-static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
-{
- /* should not happen */
- kfree_skb(skb);
- return 0;
-}
#endif
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
new file mode 100644
index 000000000000..a836afe8f68e
--- /dev/null
+++ b/include/soc/mscc/ocelot.h
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _SOC_MSCC_OCELOT_H
+#define _SOC_MSCC_OCELOT_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#define IFH_INJ_BYPASS BIT(31)
+#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+
+#define OCELOT_SPEED_2500 0
+#define OCELOT_SPEED_1000 1
+#define OCELOT_SPEED_100 2
+#define OCELOT_SPEED_10 3
+
+#define TARGET_OFFSET 24
+#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
+#define REG(reg, offset) [reg & REG_MASK] = offset
+
+#define REG_RESERVED_ADDR 0xffffffff
+#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
+
+enum ocelot_target {
+ ANA = 1,
+ QS,
+ QSYS,
+ REW,
+ SYS,
+ S2,
+ HSIO,
+ PTP,
+ GCB,
+ TARGET_MAX,
+};
+
+enum ocelot_reg {
+ ANA_ADVLEARN = ANA << TARGET_OFFSET,
+ ANA_VLANMASK,
+ ANA_PORT_B_DOMAIN,
+ ANA_ANAGEFIL,
+ ANA_ANEVENTS,
+ ANA_STORMLIMIT_BURST,
+ ANA_STORMLIMIT_CFG,
+ ANA_ISOLATED_PORTS,
+ ANA_COMMUNITY_PORTS,
+ ANA_AUTOAGE,
+ ANA_MACTOPTIONS,
+ ANA_LEARNDISC,
+ ANA_AGENCTRL,
+ ANA_MIRRORPORTS,
+ ANA_EMIRRORPORTS,
+ ANA_FLOODING,
+ ANA_FLOODING_IPMC,
+ ANA_SFLOW_CFG,
+ ANA_PORT_MODE,
+ ANA_CUT_THRU_CFG,
+ ANA_PGID_PGID,
+ ANA_TABLES_ANMOVED,
+ ANA_TABLES_MACHDATA,
+ ANA_TABLES_MACLDATA,
+ ANA_TABLES_STREAMDATA,
+ ANA_TABLES_MACACCESS,
+ ANA_TABLES_MACTINDX,
+ ANA_TABLES_VLANACCESS,
+ ANA_TABLES_VLANTIDX,
+ ANA_TABLES_ISDXACCESS,
+ ANA_TABLES_ISDXTIDX,
+ ANA_TABLES_ENTRYLIM,
+ ANA_TABLES_PTP_ID_HIGH,
+ ANA_TABLES_PTP_ID_LOW,
+ ANA_TABLES_STREAMACCESS,
+ ANA_TABLES_STREAMTIDX,
+ ANA_TABLES_SEQ_HISTORY,
+ ANA_TABLES_SEQ_MASK,
+ ANA_TABLES_SFID_MASK,
+ ANA_TABLES_SFIDACCESS,
+ ANA_TABLES_SFIDTIDX,
+ ANA_MSTI_STATE,
+ ANA_OAM_UPM_LM_CNT,
+ ANA_SG_ACCESS_CTRL,
+ ANA_SG_CONFIG_REG_1,
+ ANA_SG_CONFIG_REG_2,
+ ANA_SG_CONFIG_REG_3,
+ ANA_SG_CONFIG_REG_4,
+ ANA_SG_CONFIG_REG_5,
+ ANA_SG_GCL_GS_CONFIG,
+ ANA_SG_GCL_TI_CONFIG,
+ ANA_SG_STATUS_REG_1,
+ ANA_SG_STATUS_REG_2,
+ ANA_SG_STATUS_REG_3,
+ ANA_PORT_VLAN_CFG,
+ ANA_PORT_DROP_CFG,
+ ANA_PORT_QOS_CFG,
+ ANA_PORT_VCAP_CFG,
+ ANA_PORT_VCAP_S1_KEY_CFG,
+ ANA_PORT_VCAP_S2_CFG,
+ ANA_PORT_PCP_DEI_MAP,
+ ANA_PORT_CPU_FWD_CFG,
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ ANA_PORT_CPU_FWD_GARP_CFG,
+ ANA_PORT_CPU_FWD_CCM_CFG,
+ ANA_PORT_PORT_CFG,
+ ANA_PORT_POL_CFG,
+ ANA_PORT_PTP_CFG,
+ ANA_PORT_PTP_DLY1_CFG,
+ ANA_PORT_PTP_DLY2_CFG,
+ ANA_PORT_SFID_CFG,
+ ANA_PFC_PFC_CFG,
+ ANA_PFC_PFC_TIMER,
+ ANA_IPT_OAM_MEP_CFG,
+ ANA_IPT_IPT,
+ ANA_PPT_PPT,
+ ANA_FID_MAP_FID_MAP,
+ ANA_AGGR_CFG,
+ ANA_CPUQ_CFG,
+ ANA_CPUQ_CFG2,
+ ANA_CPUQ_8021_CFG,
+ ANA_DSCP_CFG,
+ ANA_DSCP_REWR_CFG,
+ ANA_VCAP_RNG_TYPE_CFG,
+ ANA_VCAP_RNG_VAL_CFG,
+ ANA_VRAP_CFG,
+ ANA_VRAP_HDR_DATA,
+ ANA_VRAP_HDR_MASK,
+ ANA_DISCARD_CFG,
+ ANA_FID_CFG,
+ ANA_POL_PIR_CFG,
+ ANA_POL_CIR_CFG,
+ ANA_POL_MODE_CFG,
+ ANA_POL_PIR_STATE,
+ ANA_POL_CIR_STATE,
+ ANA_POL_STATE,
+ ANA_POL_FLOWC,
+ ANA_POL_HYST,
+ ANA_POL_MISC_CFG,
+ QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
+ QS_XTR_RD,
+ QS_XTR_FRM_PRUNING,
+ QS_XTR_FLUSH,
+ QS_XTR_DATA_PRESENT,
+ QS_XTR_CFG,
+ QS_INJ_GRP_CFG,
+ QS_INJ_WR,
+ QS_INJ_CTRL,
+ QS_INJ_STATUS,
+ QS_INJ_ERR,
+ QS_INH_DBG,
+ QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
+ QSYS_SWITCH_PORT_MODE,
+ QSYS_STAT_CNT_CFG,
+ QSYS_EEE_CFG,
+ QSYS_EEE_THRES,
+ QSYS_IGR_NO_SHARING,
+ QSYS_EGR_NO_SHARING,
+ QSYS_SW_STATUS,
+ QSYS_EXT_CPU_CFG,
+ QSYS_PAD_CFG,
+ QSYS_CPU_GROUP_MAP,
+ QSYS_QMAP,
+ QSYS_ISDX_SGRP,
+ QSYS_TIMED_FRAME_ENTRY,
+ QSYS_TFRM_MISC,
+ QSYS_TFRM_PORT_DLY,
+ QSYS_TFRM_TIMER_CFG_1,
+ QSYS_TFRM_TIMER_CFG_2,
+ QSYS_TFRM_TIMER_CFG_3,
+ QSYS_TFRM_TIMER_CFG_4,
+ QSYS_TFRM_TIMER_CFG_5,
+ QSYS_TFRM_TIMER_CFG_6,
+ QSYS_TFRM_TIMER_CFG_7,
+ QSYS_TFRM_TIMER_CFG_8,
+ QSYS_RED_PROFILE,
+ QSYS_RES_QOS_MODE,
+ QSYS_RES_CFG,
+ QSYS_RES_STAT,
+ QSYS_EGR_DROP_MODE,
+ QSYS_EQ_CTRL,
+ QSYS_EVENTS_CORE,
+ QSYS_QMAXSDU_CFG_0,
+ QSYS_QMAXSDU_CFG_1,
+ QSYS_QMAXSDU_CFG_2,
+ QSYS_QMAXSDU_CFG_3,
+ QSYS_QMAXSDU_CFG_4,
+ QSYS_QMAXSDU_CFG_5,
+ QSYS_QMAXSDU_CFG_6,
+ QSYS_QMAXSDU_CFG_7,
+ QSYS_PREEMPTION_CFG,
+ QSYS_CIR_CFG,
+ QSYS_EIR_CFG,
+ QSYS_SE_CFG,
+ QSYS_SE_DWRR_CFG,
+ QSYS_SE_CONNECT,
+ QSYS_SE_DLB_SENSE,
+ QSYS_CIR_STATE,
+ QSYS_EIR_STATE,
+ QSYS_SE_STATE,
+ QSYS_HSCH_MISC_CFG,
+ QSYS_TAG_CONFIG,
+ QSYS_TAS_PARAM_CFG_CTRL,
+ QSYS_PORT_MAX_SDU,
+ QSYS_PARAM_CFG_REG_1,
+ QSYS_PARAM_CFG_REG_2,
+ QSYS_PARAM_CFG_REG_3,
+ QSYS_PARAM_CFG_REG_4,
+ QSYS_PARAM_CFG_REG_5,
+ QSYS_GCL_CFG_REG_1,
+ QSYS_GCL_CFG_REG_2,
+ QSYS_PARAM_STATUS_REG_1,
+ QSYS_PARAM_STATUS_REG_2,
+ QSYS_PARAM_STATUS_REG_3,
+ QSYS_PARAM_STATUS_REG_4,
+ QSYS_PARAM_STATUS_REG_5,
+ QSYS_PARAM_STATUS_REG_6,
+ QSYS_PARAM_STATUS_REG_7,
+ QSYS_PARAM_STATUS_REG_8,
+ QSYS_PARAM_STATUS_REG_9,
+ QSYS_GCL_STATUS_REG_1,
+ QSYS_GCL_STATUS_REG_2,
+ REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
+ REW_TAG_CFG,
+ REW_PORT_CFG,
+ REW_DSCP_CFG,
+ REW_PCP_DEI_QOS_MAP_CFG,
+ REW_PTP_CFG,
+ REW_PTP_DLY1_CFG,
+ REW_RED_TAG_CFG,
+ REW_DSCP_REMAP_DP1_CFG,
+ REW_DSCP_REMAP_CFG,
+ REW_STAT_CFG,
+ REW_REW_STICKY,
+ REW_PPT,
+ SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
+ SYS_COUNT_RX_UNICAST,
+ SYS_COUNT_RX_MULTICAST,
+ SYS_COUNT_RX_BROADCAST,
+ SYS_COUNT_RX_SHORTS,
+ SYS_COUNT_RX_FRAGMENTS,
+ SYS_COUNT_RX_JABBERS,
+ SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_SYM_ERRS,
+ SYS_COUNT_RX_64,
+ SYS_COUNT_RX_65_127,
+ SYS_COUNT_RX_128_255,
+ SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_1024_1526,
+ SYS_COUNT_RX_1527_MAX,
+ SYS_COUNT_RX_PAUSE,
+ SYS_COUNT_RX_CONTROL,
+ SYS_COUNT_RX_LONGS,
+ SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_TX_OCTETS,
+ SYS_COUNT_TX_UNICAST,
+ SYS_COUNT_TX_MULTICAST,
+ SYS_COUNT_TX_BROADCAST,
+ SYS_COUNT_TX_COLLISION,
+ SYS_COUNT_TX_DROPS,
+ SYS_COUNT_TX_PAUSE,
+ SYS_COUNT_TX_64,
+ SYS_COUNT_TX_65_127,
+ SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_512_1023,
+ SYS_COUNT_TX_1024_1526,
+ SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_AGING,
+ SYS_RESET_CFG,
+ SYS_SR_ETYPE_CFG,
+ SYS_VLAN_ETYPE_CFG,
+ SYS_PORT_MODE,
+ SYS_FRONT_PORT_MODE,
+ SYS_FRM_AGING,
+ SYS_STAT_CFG,
+ SYS_SW_STATUS,
+ SYS_MISC_CFG,
+ SYS_REW_MAC_HIGH_CFG,
+ SYS_REW_MAC_LOW_CFG,
+ SYS_TIMESTAMP_OFFSET,
+ SYS_CMID,
+ SYS_PAUSE_CFG,
+ SYS_PAUSE_TOT_CFG,
+ SYS_ATOP,
+ SYS_ATOP_TOT_CFG,
+ SYS_MAC_FC_CFG,
+ SYS_MMGT,
+ SYS_MMGT_FAST,
+ SYS_EVENTS_DIF,
+ SYS_EVENTS_CORE,
+ SYS_CNT,
+ SYS_PTP_STATUS,
+ SYS_PTP_TXSTAMP,
+ SYS_PTP_NXT,
+ SYS_PTP_CFG,
+ SYS_RAM_INIT,
+ SYS_CM_ADDR,
+ SYS_CM_DATA_WR,
+ SYS_CM_DATA_RD,
+ SYS_CM_OP,
+ SYS_CM_DATA,
+ S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
+ S2_CORE_MV_CFG,
+ S2_CACHE_ENTRY_DAT,
+ S2_CACHE_MASK_DAT,
+ S2_CACHE_ACTION_DAT,
+ S2_CACHE_CNT_DAT,
+ S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
+ GCB_SOFT_RST = GCB << TARGET_OFFSET,
+};
+
+enum ocelot_regfield {
+ ANA_ADVLEARN_VLAN_CHK,
+ ANA_ADVLEARN_LEARN_MIRROR,
+ ANA_ANEVENTS_FLOOD_DISCARD,
+ ANA_ANEVENTS_MSTI_DROP,
+ ANA_ANEVENTS_ACLKILL,
+ ANA_ANEVENTS_ACLUSED,
+ ANA_ANEVENTS_AUTOAGE,
+ ANA_ANEVENTS_VS2TTL1,
+ ANA_ANEVENTS_STORM_DROP,
+ ANA_ANEVENTS_LEARN_DROP,
+ ANA_ANEVENTS_AGED_ENTRY,
+ ANA_ANEVENTS_CPU_LEARN_FAILED,
+ ANA_ANEVENTS_AUTO_LEARN_FAILED,
+ ANA_ANEVENTS_LEARN_REMOVE,
+ ANA_ANEVENTS_AUTO_LEARNED,
+ ANA_ANEVENTS_AUTO_MOVED,
+ ANA_ANEVENTS_DROPPED,
+ ANA_ANEVENTS_CLASSIFIED_DROP,
+ ANA_ANEVENTS_CLASSIFIED_COPY,
+ ANA_ANEVENTS_VLAN_DISCARD,
+ ANA_ANEVENTS_FWD_DISCARD,
+ ANA_ANEVENTS_MULTICAST_FLOOD,
+ ANA_ANEVENTS_UNICAST_FLOOD,
+ ANA_ANEVENTS_DEST_KNOWN,
+ ANA_ANEVENTS_BUCKET3_MATCH,
+ ANA_ANEVENTS_BUCKET2_MATCH,
+ ANA_ANEVENTS_BUCKET1_MATCH,
+ ANA_ANEVENTS_BUCKET0_MATCH,
+ ANA_ANEVENTS_CPU_OPERATION,
+ ANA_ANEVENTS_DMAC_LOOKUP,
+ ANA_ANEVENTS_SMAC_LOOKUP,
+ ANA_ANEVENTS_SEQ_GEN_ERR_0,
+ ANA_ANEVENTS_SEQ_GEN_ERR_1,
+ ANA_TABLES_MACACCESS_B_DOM,
+ ANA_TABLES_MACTINDX_BUCKET,
+ ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_RESET_CFG_CORE_ENA,
+ SYS_RESET_CFG_MEM_ENA,
+ SYS_RESET_CFG_MEM_INIT,
+ GCB_SOFT_RST_SWC_RST,
+ REGFIELD_MAX
+};
+
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
+struct ocelot_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+enum ocelot_tag_prefix {
+ OCELOT_TAG_PREFIX_DISABLED = 0,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_SHORT,
+ OCELOT_TAG_PREFIX_LONG,
+};
+
+struct ocelot;
+
+struct ocelot_ops {
+ void (*pcs_init)(struct ocelot *ocelot, int port);
+ int (*reset)(struct ocelot *ocelot);
+};
+
+struct ocelot_port {
+ struct ocelot *ocelot;
+
+ void __iomem *regs;
+
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+ /* Egress default VLAN (vid) */
+ u16 vid;
+
+ u8 ptp_cmd;
+ struct list_head skbs;
+ u8 ts_id;
+};
+
+struct ocelot {
+ struct device *dev;
+
+ const struct ocelot_ops *ops;
+ struct regmap *targets[TARGET_MAX];
+ struct regmap_field *regfields[REGFIELD_MAX];
+ const u32 *const *map;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+
+ int shared_queue_sz;
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct ocelot_port **ports;
+
+ u8 base_mac[ETH_ALEN];
+
+ /* Keep track of the vlan port masks */
+ u32 vlan_mask[VLAN_N_VID];
+
+ u8 num_phys_ports;
+ u8 num_cpu_ports;
+ u8 cpu;
+
+ u32 *lags;
+
+ struct list_head multicast;
+
+ /* Workqueue to check statistics for overflow with its lock */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ /* Protects the PTP interface state */
+ struct mutex ptp_lock;
+ /* Protects the PTP clock */
+ spinlock_t ptp_clock_lock;
+
+ void (*port_pcs_init)(struct ocelot_port *port);
+};
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
+
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+
+/* I/O */
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset);
+
+/* Hardware initialization */
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields);
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+int ocelot_init(struct ocelot *ocelot);
+void ocelot_deinit(struct ocelot *ocelot);
+void ocelot_init_port(struct ocelot *ocelot, int port);
+
+/* DSA callbacks */
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy);
+void ocelot_port_disable(struct ocelot *ocelot, int port);
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info);
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev);
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware);
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware);
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid);
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged);
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h
index 16f91e172bcb..16f91e172bcb 100644
--- a/drivers/net/ethernet/mscc/ocelot_sys.h
+++ b/include/soc/mscc/ocelot_sys.h
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 47b5ee880aa9..ad0aa7f31675 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -8,9 +8,10 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
#include <net/page_pool.h>
-TRACE_EVENT(page_pool_inflight,
+TRACE_EVENT(page_pool_release,
TP_PROTO(const struct page_pool *pool,
s32 inflight, u32 hold, u32 release),
@@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight,
__field(s32, inflight)
__field(u32, hold)
__field(u32, release)
+ __field(u64, cnt)
),
TP_fast_assign(
@@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight,
__entry->inflight = inflight;
__entry->hold = hold;
__entry->release = release;
+ __entry->cnt = pool->destroy_cnt;
),
- TP_printk("page_pool=%p inflight=%d hold=%u release=%u",
- __entry->pool, __entry->inflight, __entry->hold, __entry->release)
+ TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
+ __entry->pool, __entry->inflight, __entry->hold,
+ __entry->release, __entry->cnt)
);
TRACE_EVENT(page_pool_state_release,
@@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, release)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->release = release;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p release=%u",
- __entry->pool, __entry->page, __entry->release)
+ TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -69,16 +75,40 @@ TRACE_EVENT(page_pool_state_hold,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, hold)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->hold = hold;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p hold=%u",
- __entry->pool, __entry->page, __entry->hold)
+ TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+);
+
+TRACE_EVENT(page_pool_update_nid,
+
+ TP_PROTO(const struct page_pool *pool, int new_nid),
+
+ TP_ARGS(pool, new_nid),
+
+ TP_STRUCT__entry(
+ __field(const struct page_pool *, pool)
+ __field(int, pool_nid)
+ __field(int, new_nid)
+ ),
+
+ TP_fast_assign(
+ __entry->pool = pool;
+ __entry->pool_nid = pool->p.nid;
+ __entry->new_nid = new_nid;
+ ),
+
+ TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
+ __entry->pool, __entry->pool_nid, __entry->new_nid)
);
#endif /* _TRACE_PAGE_POOL_H */
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 2bc9960a31aa..cf97f6339acb 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state))
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index c7e3c9c5bad3..a7378bcd9928 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -317,19 +317,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
TRACE_EVENT(mem_disconnect,
- TP_PROTO(const struct xdp_mem_allocator *xa,
- bool safe_to_remove, bool force),
+ TP_PROTO(const struct xdp_mem_allocator *xa),
- TP_ARGS(xa, safe_to_remove, force),
+ TP_ARGS(xa),
TP_STRUCT__entry(
__field(const struct xdp_mem_allocator *, xa)
__field(u32, mem_id)
__field(u32, mem_type)
__field(const void *, allocator)
- __field(bool, safe_to_remove)
- __field(bool, force)
- __field(int, disconnect_cnt)
),
TP_fast_assign(
@@ -337,19 +333,12 @@ TRACE_EVENT(mem_disconnect,
__entry->mem_id = xa->mem.id;
__entry->mem_type = xa->mem.type;
__entry->allocator = xa->allocator;
- __entry->safe_to_remove = safe_to_remove;
- __entry->force = force;
- __entry->disconnect_cnt = xa->disconnect_cnt;
),
- TP_printk("mem_id=%d mem_type=%s allocator=%p"
- " safe_to_remove=%s force=%s disconnect_cnt=%d",
+ TP_printk("mem_id=%d mem_type=%s allocator=%p",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
- __entry->allocator,
- __entry->safe_to_remove ? "true" : "false",
- __entry->force ? "true" : "false",
- __entry->disconnect_cnt
+ __entry->allocator
)
);
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 1e988fdeba34..6a6d2c7655ff 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* linux/can.h
*
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 0fb328d93148..dd2b925b09ac 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* linux/can/bcm.h
*
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index bfc4b5d22a5e..34633283de64 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* linux/can/error.h
*
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3aea5388c8e4..c2190bbe21d8 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* linux/can/gw.h
*
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
index c32325342d30..df6e821075c1 100644
--- a/include/uapi/linux/can/j1939.h
+++ b/include/uapi/linux/can/j1939.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* j1939.h
*
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 1bc70d3a4d39..6f598b73839e 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* linux/can/netlink.h
*
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index be3b36e7ff61..6a11d308eb5c 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* linux/can/raw.h
*
diff --git a/include/uapi/linux/can/vxcan.h b/include/uapi/linux/can/vxcan.h
index 066812d118a2..4fa9d8777a07 100644
--- a/include/uapi/linux/can/vxcan.h
+++ b/include/uapi/linux/can/vxcan.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
#ifndef _UAPI_CAN_VXCAN_H
#define _UAPI_CAN_VXCAN_H
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index b558ea88b766..ae37fd4d194a 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -421,10 +421,11 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
+ DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
+
DEVLINK_ATTR_NETNS_FD, /* u32 */
DEVLINK_ATTR_NETNS_PID, /* u32 */
DEVLINK_ATTR_NETNS_ID, /* u32 */
-
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 065408e16a80..852f234f1fd6 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -13,6 +13,7 @@ enum {
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
TCA_STATS_BASIC_HW,
+ TCA_STATS_PKT64,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
@@ -26,10 +27,6 @@ struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u32 packets;
-} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index de696ca12f2c..f6035f737193 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -27,6 +27,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
LWTUNNEL_IP_PAD,
+ LWTUNNEL_IP_OPTS,
__LWTUNNEL_IP_MAX,
};
@@ -41,12 +42,52 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
LWTUNNEL_IP6_PAD,
+ LWTUNNEL_IP6_OPTS,
__LWTUNNEL_IP6_MAX,
};
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
enum {
+ LWTUNNEL_IP_OPTS_UNSPEC,
+ LWTUNNEL_IP_OPTS_GENEVE,
+ LWTUNNEL_IP_OPTS_VXLAN,
+ LWTUNNEL_IP_OPTS_ERSPAN,
+ __LWTUNNEL_IP_OPTS_MAX,
+};
+
+#define LWTUNNEL_IP_OPTS_MAX (__LWTUNNEL_IP_OPTS_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_GENEVE_UNSPEC,
+ LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ LWTUNNEL_IP_OPT_GENEVE_TYPE,
+ LWTUNNEL_IP_OPT_GENEVE_DATA,
+ __LWTUNNEL_IP_OPT_GENEVE_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_GENEVE_MAX (__LWTUNNEL_IP_OPT_GENEVE_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_VXLAN_UNSPEC,
+ LWTUNNEL_IP_OPT_VXLAN_GBP,
+ __LWTUNNEL_IP_OPT_VXLAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_VXLAN_MAX (__LWTUNNEL_IP_OPT_VXLAN_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_ERSPAN_UNSPEC,
+ LWTUNNEL_IP_OPT_ERSPAN_VER,
+ LWTUNNEL_IP_OPT_ERSPAN_INDEX,
+ LWTUNNEL_IP_OPT_ERSPAN_DIR,
+ LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ __LWTUNNEL_IP_OPT_ERSPAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_ERSPAN_MAX (__LWTUNNEL_IP_OPT_ERSPAN_MAX - 1)
+
+enum {
LWT_BPF_PROG_UNSPEC,
LWT_BPF_PROG_FD,
LWT_BPF_PROG_NAME,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index eea166c52c36..11a72a938eb1 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -205,6 +205,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
IPSET_FLAG_BIT_WITH_SKBINFO = 6,
IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
+ IPSET_FLAG_BIT_IFACE_WILDCARD = 7,
+ IPSET_FLAG_IFACE_WILDCARD = (1 << IPSET_FLAG_BIT_IFACE_WILDCARD),
IPSET_FLAG_CADT_MAX = 15,
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 81fed16fe2b2..bb9b049310df 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1518,6 +1518,7 @@ enum nft_object_attributes {
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
*/
enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC,
@@ -1527,6 +1528,7 @@ enum nft_flowtable_attributes {
NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD,
+ NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX
};
#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 64135ab3a7ac..341e0e8cae46 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -249,6 +249,22 @@
*/
/**
+ * DOC: VLAN offload support for setting group keys and binding STAs to VLANs
+ *
+ * By setting @NL80211_EXT_FEATURE_VLAN_OFFLOAD flag drivers can indicate they
+ * support offloading VLAN functionality in a manner where the driver exposes a
+ * single netdev that uses VLAN tagged frames and separate VLAN-specific netdevs
+ * can then be added using RTM_NEWLINK/IFLA_VLAN_ID similarly to the Ethernet
+ * case. Frames received from stations that are not assigned to any VLAN are
+ * delivered on the main netdev and frames to such stations can be sent through
+ * that main netdev.
+ *
+ * %NL80211_CMD_NEW_KEY (for group keys), %NL80211_CMD_NEW_STATION, and
+ * %NL80211_CMD_SET_STATION will optionally specify vlan_id using
+ * %NL80211_ATTR_VLAN_ID.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -2381,6 +2397,9 @@ enum nl80211_commands {
* the allowed channel bandwidth configurations. (u8 attribute)
* Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
*
+ * @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key
+ * (u16).
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2843,6 +2862,8 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_EDMG_CHANNELS,
NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+ NL80211_ATTR_VLAN_ID,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -5492,6 +5513,10 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in
* station mode (SAE password is passed as part of the connect command).
*
+ * @NL80211_EXT_FEATURE_VLAN_OFFLOAD: The driver supports a single netdev
+ * with VLAN tagged frames and separate VLAN-specific netdevs added using
+ * vconfig similarly to the Ethernet case.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5537,6 +5562,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
+ NL80211_EXT_FEATURE_VLAN_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index e168dc59e9a0..d99b5a772698 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -63,6 +63,7 @@ struct nvme_passthru_cmd64 {
__u32 cdw14;
__u32 cdw15;
__u32 timeout_ms;
+ __u32 rsvd2;
__u64 result;
};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1887a451c388..a87b44cd5590 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -173,6 +173,7 @@ enum ovs_packet_cmd {
* @OVS_PACKET_ATTR_LEN: Packet size before truncation.
* %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
* size.
+ * @OVS_PACKET_ATTR_HASH: Packet hash info (e.g. hash, sw_hash and l4_hash in skb).
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -190,7 +191,8 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
- OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_HASH, /* Packet hash. */
__OVS_PACKET_ATTR_MAX
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 5011259b8f67..9f1a72876212 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -950,19 +950,25 @@ enum {
TCA_PIE_BETA,
TCA_PIE_ECN,
TCA_PIE_BYTEMODE,
+ TCA_PIE_DQ_RATE_ESTIMATOR,
__TCA_PIE_MAX
};
#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
struct tc_pie_xstats {
- __u64 prob; /* current probability */
- __u32 delay; /* current delay in ms */
- __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
- __u32 packets_in; /* total number of packets enqueued */
- __u32 dropped; /* packets dropped due to pie_action */
- __u32 overlimit; /* dropped due to lack of space in queue */
- __u32 maxq; /* maximum queue size */
- __u32 ecn_mark; /* packets marked with ecn*/
+ __u64 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in
+ * bits/pie_time
+ */
+ __u32 dq_rate_estimating; /* is avg_dq_rate being calculated? */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space
+ * in queue
+ */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
};
/* CBS */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 59e89a1bc3bb..9dc9d0079e98 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -31,13 +31,16 @@
#define PTP_ENABLE_FEATURE (1<<0)
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
+#define PTP_STRICT_FLAGS (1<<3)
+#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
* flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
*/
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
- PTP_FALLING_EDGE)
+ PTP_FALLING_EDGE | \
+ PTP_STRICT_FLAGS)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 99335e1f4a27..25b4fa00bad1 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -51,6 +51,10 @@
* sent when the child exits.
* @stack: Specify the location of the stack for the
* child process.
+ * Note, @stack is expected to point to the
+ * lowest address. The stack direction will be
+ * determined by the kernel and set up
+ * appropriately based on @stack_size.
* @stack_size: The size of the stack for the child process.
* @tls: If CLONE_SETTLS is set, the tls descriptor
* is set to tls.
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6bce7f9837a9..28ad40d9acba 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -105,6 +105,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
#define SCTP_REUSE_PORT 36
+#define SCTP_PEER_ADDR_THLDS_V2 37
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -137,6 +138,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_ASCONF_SUPPORTED 128
#define SCTP_AUTH_SUPPORTED 129
#define SCTP_ECN_SUPPORTED 130
+#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
+#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -410,6 +413,8 @@ enum sctp_spc_state {
SCTP_ADDR_ADDED,
SCTP_ADDR_MADE_PRIM,
SCTP_ADDR_CONFIRMED,
+ SCTP_ADDR_POTENTIALLY_FAILED,
+#define SCTP_ADDR_PF SCTP_ADDR_POTENTIALLY_FAILED
};
@@ -933,6 +938,7 @@ struct sctp_paddrinfo {
enum sctp_spinfo_state {
SCTP_INACTIVE,
SCTP_PF,
+#define SCTP_POTENTIALLY_FAILED SCTP_PF
SCTP_ACTIVE,
SCTP_UNCONFIRMED,
SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
@@ -1082,6 +1088,15 @@ struct sctp_paddrthlds {
__u16 spt_pathpfthld;
};
+/* Use a new structure with spt_pathcpthld for back compatibility */
+struct sctp_paddrthlds_v2 {
+ sctp_assoc_t spt_assoc_id;
+ struct sockaddr_storage spt_address;
+ __u16 spt_pathmaxrxt;
+ __u16 spt_pathpfthld;
+ __u16 spt_pathcpthld;
+};
+
/*
* Socket Option for Getting the Association/Stream-Specific PR-SCTP Status
*/
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 76421b878767..add01db1daef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -233,6 +233,27 @@ struct tipc_sioc_nodeid_req {
char node_id[TIPC_NODEID_LEN];
};
+/*
+ * TIPC Crypto, AEAD
+ */
+#define TIPC_AEAD_ALG_NAME (32)
+
+struct tipc_aead_key {
+ char alg_name[TIPC_AEAD_ALG_NAME];
+ unsigned int keylen; /* in bytes */
+ char key[];
+};
+
+#define TIPC_AEAD_KEYLEN_MIN (16 + 4)
+#define TIPC_AEAD_KEYLEN_MAX (32 + 4)
+#define TIPC_AEAD_KEY_SIZE_MAX (sizeof(struct tipc_aead_key) + \
+ TIPC_AEAD_KEYLEN_MAX)
+
+static inline int tipc_aead_key_size(struct tipc_aead_key *key)
+{
+ return sizeof(*key) + key->keylen;
+}
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index efb958fd167d..6c2194ab745b 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -63,6 +63,8 @@ enum {
TIPC_NL_PEER_REMOVE,
TIPC_NL_BEARER_ADD,
TIPC_NL_UDP_GET_REMOTEIP,
+ TIPC_NL_KEY_SET,
+ TIPC_NL_KEY_FLUSH,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -160,6 +162,8 @@ enum {
TIPC_NLA_NODE_UNSPEC,
TIPC_NLA_NODE_ADDR, /* u32 */
TIPC_NLA_NODE_UP, /* flag */
+ TIPC_NLA_NODE_ID, /* data */
+ TIPC_NLA_NODE_KEY, /* data */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 1f31c2f1e6fc..4508d5e0cf69 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -351,12 +351,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- inode_unlock(d_backing_inode(parent->dentry));
if (d_is_positive(d)) {
/* update watch filter fields */
watch->dev = d->d_sb->s_dev;
watch->ino = d_backing_inode(d)->i_ino;
}
+ inode_unlock(d_backing_inode(parent->dentry));
dput(d);
return 0;
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index ddd8addcdb5c..a3eaf08e7dd3 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
return false;
switch (off) {
- case offsetof(struct bpf_sysctl, write):
+ case bpf_ctx_range(struct bpf_sysctl, write):
if (type != BPF_READ)
return false;
bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size, size_default);
- case offsetof(struct bpf_sysctl, file_pos):
+ case bpf_ctx_range(struct bpf_sysctl, file_pos):
if (type == BPF_READ) {
bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size, size_default);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 080561bb8a4b..ef4242e5d4bc 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2119,11 +2119,12 @@ int cgroup_do_get_tree(struct fs_context *fc)
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
dput(fc->root);
- fc->root = nsdentry;
if (IS_ERR(nsdentry)) {
- ret = PTR_ERR(nsdentry);
deactivate_locked_super(sb);
+ ret = PTR_ERR(nsdentry);
+ nsdentry = NULL;
}
+ fc->root = nsdentry;
}
if (!ctx->kfc.new_sb_created)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fc28e17940e0..e2cad3ee2ead 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2373,7 +2373,18 @@ void __init boot_cpu_hotplug_init(void)
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
-enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF,
+ CPU_MITIGATIONS_AUTO,
+ CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+static enum cpu_mitigations cpu_mitigations __ro_after_init =
+ CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
@@ -2390,3 +2401,17 @@ static int __init mitigations_parse_cmdline(char *arg)
return 0;
}
early_param("mitigations", mitigations_parse_cmdline);
+
+/* mitigations=off */
+bool cpu_mitigations_off(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_off);
+
+/* mitigations=auto,nosmt */
+bool cpu_mitigations_auto_nosmt(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 73c616876597..834640057c93 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1031,7 +1031,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
{
}
-void
+static inline void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
@@ -10532,6 +10532,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
goto err_ns;
}
+ /*
+ * Disallow uncore-cgroup events, they don't make sense as the cgroup will
+ * be different on other CPUs in the uncore mask.
+ */
+ if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
+ err = -EINVAL;
+ goto err_pmu;
+ }
+
if (event->attr.aux_output &&
!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
err = -EOPNOTSUPP;
@@ -11320,8 +11329,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
int err;
/*
- * Get the target context (task or percpu):
+ * Grouping is not supported for kernel events, neither is 'AUX',
+ * make sure the caller's intentions are adjusted.
*/
+ if (attr->aux_output)
+ return ERR_PTR(-EINVAL);
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context, -1);
@@ -11333,6 +11345,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE;
+ /*
+ * Get the target context (task or percpu):
+ */
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -11784,7 +11799,7 @@ inherit_event(struct perf_event *parent_event,
GFP_KERNEL);
if (!child_ctx->task_ctx_data) {
free_event(child_event);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -11887,7 +11902,7 @@ static int inherit_group(struct perf_event *parent_event,
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
- if (sub->aux_event == parent_event &&
+ if (sub->aux_event == parent_event && child_ctr &&
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index bcdf53125210..55af6931c6ec 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2561,7 +2561,35 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
return 0;
}
-static bool clone3_args_valid(const struct kernel_clone_args *kargs)
+/**
+ * clone3_stack_valid - check and prepare stack
+ * @kargs: kernel clone args
+ *
+ * Verify that the stack arguments userspace gave us are sane.
+ * In addition, set the stack direction for userspace since it's easy for us to
+ * determine.
+ */
+static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
+{
+ if (kargs->stack == 0) {
+ if (kargs->stack_size > 0)
+ return false;
+ } else {
+ if (kargs->stack_size == 0)
+ return false;
+
+ if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
+ return false;
+
+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
+ kargs->stack += kargs->stack_size;
+#endif
+ }
+
+ return true;
+}
+
+static bool clone3_args_valid(struct kernel_clone_args *kargs)
{
/*
* All lower bits of the flag word are taken.
@@ -2581,6 +2609,9 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
kargs->exit_signal)
return false;
+ if (!clone3_stack_valid(kargs))
+ return false;
+
return true;
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 132672b74e4b..dd822fd8a7d5 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
* @type: Type of irqchip_fwnode. See linux/irqdomain.h
* @name: Optional user provided domain name
* @id: Optional user provided id if name != NULL
- * @data: Optional user-provided data
+ * @pa: Optional user-provided physical address
*
* Allocate a struct irqchip_fwid, and return a poiner to the embedded
* fwnode_handle (or NULL on failure).
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dd05a378631a..0f2eb3629070 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1073,6 +1073,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
task_rq_unlock(rq, p, &rf);
}
+#ifdef CONFIG_UCLAMP_TASK_GROUP
static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state *css,
unsigned int clamps)
@@ -1091,7 +1092,6 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css,
css_task_iter_end(&it);
}
-#ifdef CONFIG_UCLAMP_TASK_GROUP
static void cpu_util_update_eff(struct cgroup_subsys_state *css);
static void uclamp_update_root_tg(void)
{
@@ -3929,13 +3929,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
restart:
+#ifdef CONFIG_SMP
/*
- * Ensure that we put DL/RT tasks before the pick loop, such that they
- * can PULL higher prio tasks when we lower the RQ 'priority'.
+ * We must do the balancing pass before put_next_task(), such
+ * that when we release the rq->lock the task is in the same
+ * state as before we took rq->lock.
+ *
+ * We can terminate the balance pass as soon as we know there is
+ * a runnable task of @class priority or higher.
*/
- prev->sched_class->put_prev_task(rq, prev, rf);
- if (!rq->nr_running)
- newidle_balance(rq, rf);
+ for_class_range(class, prev->sched_class, &idle_sched_class) {
+ if (class->balance(rq, prev, rf))
+ break;
+ }
+#endif
+
+ put_prev_task(rq, prev);
for_each_class(class) {
p = class->pick_next_task(rq, NULL, NULL);
@@ -6201,7 +6210,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
for_each_class(class) {
next = class->pick_next_task(rq, NULL, NULL);
if (next) {
- next->sched_class->put_prev_task(rq, next, NULL);
+ next->sched_class->put_prev_task(rq, next);
return next;
}
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2dc48720f189..a8a08030a8f7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1691,6 +1691,22 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
resched_curr(rq);
}
+static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+{
+ if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
+ /*
+ * This is OK, because current is on_cpu, which avoids it being
+ * picked for load-balance and preemption/IRQs are still
+ * disabled avoiding further scheduler activity on it and we've
+ * not yet started the picking loop.
+ */
+ rq_unpin_lock(rq, rf);
+ pull_dl_task(rq);
+ rq_repin_lock(rq, rf);
+ }
+
+ return sched_stop_runnable(rq) || sched_dl_runnable(rq);
+}
#endif /* CONFIG_SMP */
/*
@@ -1758,45 +1774,28 @@ static struct task_struct *
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct sched_dl_entity *dl_se;
+ struct dl_rq *dl_rq = &rq->dl;
struct task_struct *p;
- struct dl_rq *dl_rq;
WARN_ON_ONCE(prev || rf);
- dl_rq = &rq->dl;
-
- if (unlikely(!dl_rq->dl_nr_running))
+ if (!sched_dl_runnable(rq))
return NULL;
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
-
p = dl_task_of(dl_se);
-
set_next_task_dl(rq, p);
-
return p;
}
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
-
- if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
- /*
- * This is OK, because current is on_cpu, which avoids it being
- * picked for load-balance and preemption/IRQs are still
- * disabled avoiding further scheduler activity on it and we've
- * not yet started the picking loop.
- */
- rq_unpin_lock(rq, rf);
- pull_dl_task(rq);
- rq_repin_lock(rq, rf);
- }
}
/*
@@ -2442,6 +2441,7 @@ const struct sched_class dl_sched_class = {
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP
+ .balance = balance_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 682a754ea3e1..22a2fed29054 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6570,6 +6570,15 @@ static void task_dead_fair(struct task_struct *p)
{
remove_entity_load_avg(&p->se);
}
+
+static int
+balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+ if (rq->nr_running)
+ return 1;
+
+ return newidle_balance(rq, rf) != 0;
+}
#endif /* CONFIG_SMP */
static unsigned long wakeup_gran(struct sched_entity *se)
@@ -6746,7 +6755,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
int new_tasks;
again:
- if (!cfs_rq->nr_running)
+ if (!sched_fair_runnable(rq))
goto idle;
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6884,7 +6893,7 @@ idle:
/*
* Account for a descheduled task:
*/
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
@@ -10414,11 +10423,11 @@ const struct sched_class fair_sched_class = {
.check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair,
-
.put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
#ifdef CONFIG_SMP
+ .balance = balance_fair,
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8dad5aa600ea..f65ef1e2f204 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -365,6 +365,12 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
+
+static int
+balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+ return WARN_ON_ONCE(1);
+}
#endif
/*
@@ -375,7 +381,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
resched_curr(rq);
}
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
@@ -460,6 +466,7 @@ const struct sched_class idle_sched_class = {
.set_next_task = set_next_task_idle,
#ifdef CONFIG_SMP
+ .balance = balance_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ebaa4e619684..9b8adc01be3d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1469,6 +1469,22 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
resched_curr(rq);
}
+static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+{
+ if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
+ /*
+ * This is OK, because current is on_cpu, which avoids it being
+ * picked for load-balance and preemption/IRQs are still
+ * disabled avoiding further scheduler activity on it and we've
+ * not yet started the picking loop.
+ */
+ rq_unpin_lock(rq, rf);
+ pull_rt_task(rq);
+ rq_repin_lock(rq, rf);
+ }
+
+ return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
+}
#endif /* CONFIG_SMP */
/*
@@ -1552,21 +1568,18 @@ static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct task_struct *p;
- struct rt_rq *rt_rq = &rq->rt;
WARN_ON_ONCE(prev || rf);
- if (!rt_rq->rt_queued)
+ if (!sched_rt_runnable(rq))
return NULL;
p = _pick_next_task_rt(rq);
-
set_next_task_rt(rq, p);
-
return p;
}
-static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
@@ -1578,18 +1591,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_fla
*/
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
-
- if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
- /*
- * This is OK, because current is on_cpu, which avoids it being
- * picked for load-balance and preemption/IRQs are still
- * disabled avoiding further scheduler activity on it and we've
- * not yet started the picking loop.
- */
- rq_unpin_lock(rq, rf);
- pull_rt_task(rq);
- rq_repin_lock(rq, rf);
- }
}
#ifdef CONFIG_SMP
@@ -2366,8 +2367,8 @@ const struct sched_class rt_sched_class = {
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
+ .balance = balance_rt,
.select_task_rq = select_task_rq_rt,
-
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0db2c1b3361e..c8870c5bd7df 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1727,10 +1727,11 @@ struct sched_class {
struct task_struct * (*pick_next_task)(struct rq *rq,
struct task_struct *prev,
struct rq_flags *rf);
- void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
+ void (*put_prev_task)(struct rq *rq, struct task_struct *p);
void (*set_next_task)(struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
@@ -1773,7 +1774,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
WARN_ON_ONCE(rq->curr != prev);
- prev->sched_class->put_prev_task(rq, prev, NULL);
+ prev->sched_class->put_prev_task(rq, prev);
}
static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -1787,8 +1788,12 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
#else
#define sched_class_highest (&dl_sched_class)
#endif
+
+#define for_class_range(class, _from, _to) \
+ for (class = (_from); class != (_to); class = class->next)
+
#define for_each_class(class) \
- for (class = sched_class_highest; class; class = class->next)
+ for_class_range(class, sched_class_highest, NULL)
extern const struct sched_class stop_sched_class;
extern const struct sched_class dl_sched_class;
@@ -1796,6 +1801,25 @@ extern const struct sched_class rt_sched_class;
extern const struct sched_class fair_sched_class;
extern const struct sched_class idle_sched_class;
+static inline bool sched_stop_runnable(struct rq *rq)
+{
+ return rq->stop && task_on_rq_queued(rq->stop);
+}
+
+static inline bool sched_dl_runnable(struct rq *rq)
+{
+ return rq->dl.dl_nr_running > 0;
+}
+
+static inline bool sched_rt_runnable(struct rq *rq)
+{
+ return rq->rt.rt_queued > 0;
+}
+
+static inline bool sched_fair_runnable(struct rq *rq)
+{
+ return rq->cfs.nr_running > 0;
+}
#ifdef CONFIG_SMP
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7e1cee4e65b2..c0640739e05e 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -15,6 +15,12 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* stop tasks as never migrate */
}
+
+static int
+balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+ return sched_stop_runnable(rq);
+}
#endif /* CONFIG_SMP */
static void
@@ -31,16 +37,13 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
static struct task_struct *
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- struct task_struct *stop = rq->stop;
-
WARN_ON_ONCE(prev || rf);
- if (!stop || !task_on_rq_queued(stop))
+ if (!sched_stop_runnable(rq))
return NULL;
- set_next_task_stop(rq, stop);
-
- return stop;
+ set_next_task_stop(rq, rq->stop);
+ return rq->stop;
}
static void
@@ -60,7 +63,7 @@ static void yield_task_stop(struct rq *rq)
BUG(); /* the stop task should never yield, its pointless. */
}
-static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
{
struct task_struct *curr = rq->curr;
u64 delta_exec;
@@ -129,6 +132,7 @@ const struct sched_class stop_sched_class = {
.set_next_task = set_next_task_stop,
#ifdef CONFIG_SMP
+ .balance = balance_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index c4da1ef56fdf..bcd46f547db3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2205,8 +2205,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
*/
preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
cgroup_enter_frozen();
+ preempt_enable_no_resched();
freezable_schedule();
cgroup_leave_frozen(true);
} else {
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 6d1f68b7e528..c9ea7eb2cb1a 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -141,7 +141,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
struct stacktrace_cookie c = {
.store = store,
.size = size,
- .skip = skipnr + 1,
+ /* skip this function if they are tracing us */
+ .skip = skipnr + !!(current == tsk),
};
if (!try_get_task_stack(tsk))
@@ -298,7 +299,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
struct stack_trace trace = {
.entries = store,
.max_entries = size,
- .skip = skipnr + 1,
+ /* skip this function if they are tracing us */
+ .skip = skipnr + !!(current == task),
};
save_stack_trace_tsk(task, &trace);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 65eb796610dc..069ca78fb0bf 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -771,7 +771,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
/* fill PPS status fields */
pps_fill_timex(txc);
- txc->time.tv_sec = (time_t)ts->tv_sec;
+ txc->time.tv_sec = ts->tv_sec;
txc->time.tv_usec = ts->tv_nsec;
if (!(time_status & STA_NANO))
txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 4bc37ac3bb05..5ee0f7709410 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -110,8 +110,7 @@ void update_vsyscall(struct timekeeper *tk)
nsec = nsec + tk->wall_to_monotonic.tv_nsec;
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
- if (__arch_use_vsyscall(vdata))
- update_vdso_data(vdata, tk);
+ update_vdso_data(vdata, tk);
__arch_update_vsyscall(vdata, tk);
@@ -124,10 +123,8 @@ void update_vsyscall_tz(void)
{
struct vdso_data *vdata = __arch_get_k_vdso_data();
- if (__arch_use_vsyscall(vdata)) {
- vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
- vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
- }
+ vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
+ vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
__arch_sync_vdso_data(vdata);
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 183f92a297ca..3321d04dfa5a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -447,7 +447,6 @@ config ASSOCIATIVE_ARRAY
config HAS_IOMEM
bool
depends on !NO_IOMEM
- select GENERIC_IO
default y
config HAS_IOPORT_MAP
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 5cff72f18c4a..33ffbf308853 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -106,7 +106,12 @@ retry:
was_locked = 1;
} else {
local_irq_restore(flags);
- cpu_relax();
+ /*
+ * Wait for the lock to release before jumping to
+ * atomic_cmpxchg() in order to mitigate the thundering herd
+ * problem.
+ */
+ do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry;
}
diff --git a/lib/idr.c b/lib/idr.c
index 66a374892482..c2cf2c52bbde 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr,
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next() - Find next populated entry.
+ * idr_get_next_ul() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next(struct idr *idr, int *nextid)
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
@@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid)
}
if (!slot)
return NULL;
- id = iter.index + base;
-
- if (WARN_ON_ONCE(id > INT_MAX))
- return NULL;
- *nextid = id;
+ *nextid = iter.index + base;
return entry;
}
-EXPORT_SYMBOL(idr_get_next);
+EXPORT_SYMBOL(idr_get_next_ul);
/**
- * idr_get_next_ul() - Find next populated entry.
+ * idr_get_next() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
+void *idr_get_next(struct idr *idr, int *nextid)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned long base = idr->idr_base;
unsigned long id = *nextid;
+ void *entry = idr_get_next_ul(idr, &id);
- id = (id < base) ? 0 : id - base;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
- if (!slot)
+ if (WARN_ON_ONCE(id > INT_MAX))
return NULL;
-
- *nextid = iter.index + base;
- return rcu_dereference_raw(*slot);
+ *nextid = id;
+ return entry;
}
-EXPORT_SYMBOL(idr_get_next_ul);
+EXPORT_SYMBOL(idr_get_next);
/**
* idr_replace() - replace pointer for given ID.
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 18c1dfbb1765..c8fa1d274530 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1529,7 +1529,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
offset = radix_tree_find_next_bit(node, IDR_FREE,
offset + 1);
start = next_index(start, node, offset);
- if (start > max)
+ if (start > max || start == 0)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) {
offset = node->offset + 1;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 9d631a7b6a70..7df4f7f395bf 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1110,6 +1110,28 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_move_tiny(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ rcu_read_unlock();
+ xa_store_index(xa, 0, GFP_KERNEL);
+ rcu_read_lock();
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ rcu_read_unlock();
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
{
XA_STATE(xas, xa, 0);
@@ -1217,6 +1239,8 @@ static noinline void check_move(struct xarray *xa)
xa_destroy(xa);
+ check_move_tiny(xa);
+
for (i = 0; i < 16; i++)
check_move_small(xa, 1UL << i);
diff --git a/lib/xarray.c b/lib/xarray.c
index 446b956c9188..1237c213f52b 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -994,6 +994,8 @@ void *__xas_prev(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index--;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
@@ -1031,6 +1033,8 @@ void *__xas_next(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index++;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 08c3c8049998..156f26fdc4c9 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1146,6 +1146,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
if (DEC_IS_DYNALLOC(s->dict.mode)) {
if (s->dict.allocated < s->dict.size) {
+ s->dict.allocated = s->dict.size;
vfree(s->dict.buf);
s->dict.buf = vmalloc(s->dict.size);
if (s->dict.buf == NULL) {
diff --git a/mm/debug.c b/mm/debug.c
index 8345bb6e4769..0461df1207cb 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -67,28 +67,31 @@ void __dump_page(struct page *page, const char *reason)
*/
mapcount = PageSlab(page) ? 0 : page_mapcount(page);
- pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx",
- page, page_ref_count(page), mapcount,
- page->mapping, page_to_pgoff(page));
if (PageCompound(page))
- pr_cont(" compound_mapcount: %d", compound_mapcount(page));
- pr_cont("\n");
- if (PageAnon(page))
- pr_warn("anon ");
- else if (PageKsm(page))
- pr_warn("ksm ");
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
+ "index:%#lx compound_mapcount: %d\n",
+ page, page_ref_count(page), mapcount,
+ page->mapping, page_to_pgoff(page),
+ compound_mapcount(page));
+ else
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
+ page, page_ref_count(page), mapcount,
+ page->mapping, page_to_pgoff(page));
+ if (PageKsm(page))
+ pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
+ else if (PageAnon(page))
+ pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
else if (mapping) {
- pr_warn("%ps ", mapping->a_ops);
if (mapping->host && mapping->host->i_dentry.first) {
struct dentry *dentry;
dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
- pr_warn("name:\"%pd\" ", dentry);
- }
+ pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
+ } else
+ pr_warn("%ps\n", mapping->a_ops);
+ pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
}
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
-
hex_only:
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index f1930fa0b445..2ac38bdc18a1 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
again:
rcu_read_lock();
h_cg = hugetlb_cgroup_from_task(current);
- if (!css_tryget_online(&h_cg->css)) {
+ if (!css_tryget(&h_cg->css)) {
rcu_read_unlock();
goto again;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0a1b4b484ac5..a8a57bebb5fa 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1028,12 +1028,13 @@ static void collapse_huge_page(struct mm_struct *mm,
anon_vma_lock_write(vma->anon_vma);
- pte = pte_offset_map(pmd, address);
- pte_ptl = pte_lockptr(mm, pmd);
-
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
address, address + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
+
+ pte = pte_offset_map(pmd, address);
+ pte_ptl = pte_lockptr(mm, pmd);
+
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
/*
* After this gup_fast can't run anymore. This also removes
@@ -1601,17 +1602,6 @@ static void collapse_file(struct mm_struct *mm,
result = SCAN_FAIL;
goto xa_unlocked;
}
- } else if (!PageUptodate(page)) {
- xas_unlock_irq(&xas);
- wait_on_page_locked(page);
- if (!trylock_page(page)) {
- result = SCAN_PAGE_LOCK;
- goto xa_unlocked;
- }
- get_page(page);
- } else if (PageDirty(page)) {
- result = SCAN_FAIL;
- goto xa_locked;
} else if (trylock_page(page)) {
get_page(page);
xas_unlock_irq(&xas);
@@ -1626,7 +1616,12 @@ static void collapse_file(struct mm_struct *mm,
* without racing with truncate.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageUptodate(page), page);
+
+ /* make sure the page is up to date */
+ if (unlikely(!PageUptodate(page))) {
+ result = SCAN_FAIL;
+ goto out_unlock;
+ }
/*
* If file was truncated then extended, or hole-punched, before
@@ -1642,6 +1637,16 @@ static void collapse_file(struct mm_struct *mm,
goto out_unlock;
}
+ if (!is_shmem && PageDirty(page)) {
+ /*
+ * khugepaged only works on read-only fd, so this
+ * page is dirty because it hasn't been flushed
+ * since first write.
+ */
+ result = SCAN_FAIL;
+ goto out_unlock;
+ }
+
if (isolate_lru_page(page)) {
result = SCAN_DEL_PAGE_LRU;
goto out_unlock;
diff --git a/mm/madvise.c b/mm/madvise.c
index 2be9f3fdb05e..94c343b4c968 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -363,8 +363,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (pageout) {
- if (!isolate_lru_page(page))
- list_add(&page->lru, &page_list);
+ if (!isolate_lru_page(page)) {
+ if (PageUnevictable(page))
+ putback_lru_page(page);
+ else
+ list_add(&page->lru, &page_list);
+ }
} else
deactivate_page(page);
huge_unlock:
@@ -441,8 +445,12 @@ regular_page:
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (pageout) {
- if (!isolate_lru_page(page))
- list_add(&page->lru, &page_list);
+ if (!isolate_lru_page(page)) {
+ if (PageUnevictable(page))
+ putback_lru_page(page);
+ else
+ list_add(&page->lru, &page_list);
+ }
} else
deactivate_page(page);
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 363106578876..46ad252e6d6a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -484,7 +484,7 @@ ino_t page_cgroup_ino(struct page *page)
unsigned long ino = 0;
rcu_read_lock();
- if (PageHead(page) && PageSlab(page))
+ if (PageSlab(page) && !PageTail(page))
memcg = memcg_from_slab_page(page);
else
memcg = READ_ONCE(page->mem_cgroup);
@@ -960,7 +960,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
if (unlikely(!memcg))
memcg = root_mem_cgroup;
}
- } while (!css_tryget_online(&memcg->css));
+ } while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
}
@@ -2535,6 +2535,15 @@ retry:
}
/*
+ * Memcg doesn't have a dedicated reserve for atomic
+ * allocations. But like the global atomic pool, we need to
+ * put the burden of reclaim on regular allocation requests
+ * and let these go through as privileged allocations.
+ */
+ if (gfp_mask & __GFP_ATOMIC)
+ goto force;
+
+ /*
* Unlike in global OOM situations, memcg is not in a physical
* memory shortage. Allow dying and OOM-killed tasks to
* bypass the last charges so that they can exit quickly and
@@ -5014,12 +5023,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
- /*
- * Flush percpu vmstats and vmevents to guarantee the value correctness
- * on parent's and all ancestor levels.
- */
- memcg_flush_percpu_vmstats(memcg, false);
- memcg_flush_percpu_vmevents(memcg);
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
free_percpu(memcg->vmstats_percpu);
@@ -5030,6 +5033,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
memcg_wb_domain_exit(memcg);
+ /*
+ * Flush percpu vmstats and vmevents to guarantee the value correctness
+ * on parent's and all ancestor levels.
+ */
+ memcg_flush_percpu_vmstats(memcg, false);
+ memcg_flush_percpu_vmevents(memcg);
__mem_cgroup_free(memcg);
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index df570e5c71cc..3b62a9ff8ea0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -447,6 +447,14 @@ static void update_pgdat_span(struct pglist_data *pgdat)
zone->spanned_pages;
/* No need to lock the zones, they can't change. */
+ if (!zone->spanned_pages)
+ continue;
+ if (!node_end_pfn) {
+ node_start_pfn = zone->zone_start_pfn;
+ node_end_pfn = zone_end_pfn;
+ continue;
+ }
+
if (zone_end_pfn > node_end_pfn)
node_end_pfn = zone_end_pfn;
if (zone->zone_start_pfn < node_start_pfn)
@@ -1638,6 +1646,18 @@ static int check_cpu_on_node(pg_data_t *pgdat)
return 0;
}
+static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
+{
+ int nid = *(int *)arg;
+
+ /*
+ * If a memory block belongs to multiple nodes, the stored nid is not
+ * reliable. However, such blocks are always online (e.g., cannot get
+ * offlined) and, therefore, are still spanned by the node.
+ */
+ return mem->nid == nid ? -EEXIST : 0;
+}
+
/**
* try_offline_node
* @nid: the node ID
@@ -1650,25 +1670,24 @@ static int check_cpu_on_node(pg_data_t *pgdat)
void try_offline_node(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
-
- if (!present_section_nr(section_nr))
- continue;
+ int rc;
- if (pfn_to_nid(pfn) != nid)
- continue;
+ /*
+ * If the node still spans pages (especially ZONE_DEVICE), don't
+ * offline it. A node spans memory after move_pfn_range_to_zone(),
+ * e.g., after the memory block was onlined.
+ */
+ if (pgdat->node_spanned_pages)
+ return;
- /*
- * some memory sections of this node are not removed, and we
- * can't offline node now.
- */
+ /*
+ * Especially offline memory blocks might not be spanned by the
+ * node. They will get spanned by the node once they get onlined.
+ * However, they link to the node in sysfs and can get onlined later.
+ */
+ rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
+ if (rc)
return;
- }
if (check_cpu_on_node(pgdat))
return;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4ae967bcf954..e08c94170ae4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -672,7 +672,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
* 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* 0 - queue pages successfully or no misplaced page.
- * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
+ * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
+ * memory range specified by nodemask and maxnode points outside
+ * your accessible address space (-EFAULT)
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -1286,7 +1288,7 @@ static long do_mbind(unsigned long start, unsigned long len,
flags | MPOL_MF_INVERT, &pagelist);
if (ret < 0) {
- err = -EIO;
+ err = ret;
goto up_out;
}
@@ -1305,10 +1307,12 @@ static long do_mbind(unsigned long start, unsigned long len,
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
- } else
- putback_movable_pages(&pagelist);
-
+ } else {
up_out:
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
+ }
+
up_write(&mm->mmap_sem);
mpol_out:
mpol_put(new);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 7fde88695f35..9a889e456168 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
mn->ops->invalidate_range_start, _ret,
!mmu_notifier_range_blockable(range) ? "non-" : "");
WARN_ON(mmu_notifier_range_blockable(range) ||
- ret != -EAGAIN);
+ _ret != -EAGAIN);
ret = _ret;
}
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ecc3dbad606b..f391c0c4ed1d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1948,6 +1948,14 @@ void __init page_alloc_init_late(void)
wait_for_completion(&pgdat_init_all_done_comp);
/*
+ * The number of managed pages has changed due to the initialisation
+ * so the pcpu batch and high limits needs to be updated or the limits
+ * will be artificially small.
+ */
+ for_each_populated_zone(zone)
+ zone_pcp_update(zone);
+
+ /*
* We initialized the rest of the deferred pages. Permanently disable
* on-demand struct page initialization.
*/
@@ -3720,10 +3728,6 @@ try_this_zone:
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
- static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
-
- if (!__ratelimit(&show_mem_rs))
- return;
/*
* This documents exceptions given to allocations in certain
@@ -3744,8 +3748,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
+ static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
return;
@@ -8514,7 +8517,6 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
WARN(count != 0, "%d pages are still in use!\n", count);
}
-#ifdef CONFIG_MEMORY_HOTPLUG
/*
* The zone indicated has a new number of managed_pages; batch sizes and percpu
* page high values need to be recalulated.
@@ -8528,7 +8530,6 @@ void __meminit zone_pcp_update(struct zone *zone)
per_cpu_ptr(zone->pageset, cpu));
mutex_unlock(&pcp_batch_high_lock);
}
-#endif
void zone_pcp_reset(struct zone *zone)
{
diff --git a/mm/page_io.c b/mm/page_io.c
index 24ee600f9131..60a66a58b9bf 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -73,6 +73,7 @@ static void swap_slot_free_notify(struct page *page)
{
struct swap_info_struct *sis;
struct gendisk *disk;
+ swp_entry_t entry;
/*
* There is no guarantee that the page is in swap cache - the software
@@ -104,11 +105,10 @@ static void swap_slot_free_notify(struct page *page)
* we again wish to reclaim it.
*/
disk = sis->bdev->bd_disk;
- if (disk->fops->swap_slot_free_notify) {
- swp_entry_t entry;
+ entry.val = page_private(page);
+ if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
unsigned long offset;
- entry.val = page_private(page);
offset = swp_offset(entry);
SetPageDirty(page);
diff --git a/mm/slab.h b/mm/slab.h
index 68e455f2b698..b2b01694dc43 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -323,8 +323,8 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
* Expects a pointer to a slab page. Please note, that PageSlab() check
* isn't sufficient, as it returns true also for tail compound slab pages,
* which do not have slab_cache pointer set.
- * So this function assumes that the page can pass PageHead() and PageSlab()
- * checks.
+ * So this function assumes that the page can pass PageSlab() && !PageTail()
+ * check.
*
* The kmem_cache can be reparented asynchronously. The caller must ensure
* the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
diff --git a/mm/slub.c b/mm/slub.c
index b25c807a111f..e72e802fc569 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1433,12 +1433,15 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *old_tail = *tail ? *tail : *head;
int rsize;
- if (slab_want_init_on_free(s)) {
- void *p = NULL;
+ /* Head and tail of the reconstructed freelist */
+ *head = NULL;
+ *tail = NULL;
- do {
- object = next;
- next = get_freepointer(s, object);
+ do {
+ object = next;
+ next = get_freepointer(s, object);
+
+ if (slab_want_init_on_free(s)) {
/*
* Clear the object and the metadata, but don't touch
* the redzone.
@@ -1448,29 +1451,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
: 0;
memset((char *)object + s->inuse, 0,
s->size - s->inuse - rsize);
- set_freepointer(s, object, p);
- p = object;
- } while (object != old_tail);
- }
-
-/*
- * Compiler cannot detect this function can be removed if slab_free_hook()
- * evaluates to nothing. Thus, catch all relevant config debug options here.
- */
-#if defined(CONFIG_LOCKDEP) || \
- defined(CONFIG_DEBUG_KMEMLEAK) || \
- defined(CONFIG_DEBUG_OBJECTS_FREE) || \
- defined(CONFIG_KASAN)
- next = *head;
-
- /* Head and tail of the reconstructed freelist */
- *head = NULL;
- *tail = NULL;
-
- do {
- object = next;
- next = get_freepointer(s, object);
+ }
/* If object's reuse doesn't have to be delayed */
if (!slab_free_hook(s, object)) {
/* Move object to the new freelist */
@@ -1485,9 +1467,6 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*tail = NULL;
return *head != NULL;
-#else
- return true;
-#endif
}
static void *setup_object(struct kmem_cache *s, struct page *page,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6afc892a148a..a8222041bd44 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1383,12 +1383,29 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
unsigned long freecount = 0;
struct free_area *area;
struct list_head *curr;
+ bool overflow = false;
area = &(zone->free_area[order]);
- list_for_each(curr, &area->free_list[mtype])
- freecount++;
- seq_printf(m, "%6lu ", freecount);
+ list_for_each(curr, &area->free_list[mtype]) {
+ /*
+ * Cap the free_list iteration because it might
+ * be really large and we are under a spinlock
+ * so a long time spent here could trigger a
+ * hard lockup detector. Anyway this is a
+ * debugging tool so knowing there is a handful
+ * of pages of this order should be more than
+ * sufficient.
+ */
+ if (++freecount >= 100000) {
+ overflow = true;
+ break;
+ }
+ }
+ seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
+ spin_unlock_irq(&zone->lock);
+ cond_resched();
+ spin_lock_irq(&zone->lock);
}
seq_putc(m, '\n');
}
@@ -1972,7 +1989,7 @@ void __init init_mm_internals(void)
#endif
#ifdef CONFIG_PROC_FS
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
- proc_create_seq("pagetypeinfo", 0444, NULL, &pagetypeinfo_op);
+ proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
#endif
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6c11cdf4dd4c..fbd0c5e7b299 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -109,7 +109,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
dev_kfree_skb(skb);
goto as_indicate_complete;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 908cbb8654f5..ba144d035e3d 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -381,7 +381,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
msg->pvc.sap_addr.vpi,
msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
if (error) {
sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
&old_vcc->qos, error);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb222b882b67..324306d6fde0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1384,7 +1384,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
out:
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index dcdbaeeb2358..cd6afe895db9 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -356,7 +356,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
make->sk_state = TCP_ESTABLISHED;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
bh_unlock_sock(sk);
} else {
if (!mine)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 64054edc2e3c..4ff6cf1ecae7 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1085,7 +1085,6 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
hard_iface->bat_v.aggr_len = 0;
skb_queue_head_init(&hard_iface->bat_v.aggr_list);
- spin_lock_init(&hard_iface->bat_v.aggr_list_lock);
INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq,
batadv_v_ogm_aggr_work);
}
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 8033f24f506c..714ce56cfcc8 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -152,7 +152,7 @@ static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
* @skb: the OGM to check
* @hard_iface: the interface to use to send the OGM
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*
* Return: True, if the given OGMv2 packet still fits, false otherwise.
*/
@@ -163,7 +163,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
BATADV_MAX_AGGREGATION_BYTES);
unsigned int ogm_len = batadv_v_ogm_len(skb);
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
return hard_iface->bat_v.aggr_len + ogm_len <= max;
}
@@ -174,17 +174,13 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
*
* Empties the OGMv2 aggregation queue and frees all the skbs it contained.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
{
- struct sk_buff *skb;
-
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
-
- while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list)))
- kfree_skb(skb);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
+ __skb_queue_purge(&hard_iface->bat_v.aggr_list);
hard_iface->bat_v.aggr_len = 0;
}
@@ -197,7 +193,7 @@ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
*
* The aggregation queue is empty after this call.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{
@@ -206,7 +202,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
unsigned int ogm_len;
struct sk_buff *skb;
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
if (!aggr_len)
return;
@@ -220,7 +216,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
skb_reset_network_header(skb_aggr);
- while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
+ while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
ogm_len = batadv_v_ogm_len(skb);
@@ -247,13 +243,13 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
return;
}
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
if (!batadv_v_ogm_queue_left(skb, hard_iface))
batadv_v_ogm_aggr_send(hard_iface);
hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
- skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
@@ -392,9 +388,9 @@ void batadv_v_ogm_aggr_work(struct work_struct *work)
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_send(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_start_queue_timer(hard_iface);
}
@@ -425,9 +421,9 @@ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_list_free(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 6967f2e4c3f4..c7b340ddd0e7 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2019.4"
+#define BATADV_SOURCE_VERSION "2019.5"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 1d5bdf3a4b65..f9ec8e7507b6 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1421,7 +1421,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (*orig)
return BATADV_FORW_SINGLE;
- /* fall through */
+ fallthrough;
case 0:
return BATADV_FORW_NONE;
default:
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5ee8e9a100f9..832e156c519e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/percpu.h>
@@ -230,7 +229,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
break;
}
- /* fall through */
+ fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
@@ -455,7 +454,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
break;
- /* fall through */
+ fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 4d7f1baee7b7..47718a82eaf2 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -130,9 +130,6 @@ struct batadv_hard_iface_bat_v {
/** @aggr_len: size of the OGM aggregate (excluding ethernet header) */
unsigned int aggr_len;
- /** @aggr_list_lock: protects aggr_list */
- spinlock_t aggr_list_lock;
-
/**
* @throughput_override: throughput override to disable link
* auto-detection
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5f508c50649d..3fd124927d4d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -173,7 +173,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
else
release_sock(sk);
- parent->sk_ack_backlog++;
+ sk_acceptq_added(parent);
}
EXPORT_SYMBOL(bt_accept_enqueue);
@@ -185,7 +185,7 @@ void bt_accept_unlink(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
- bt_sk(sk)->parent->sk_ack_backlog--;
+ sk_acceptq_removed(bt_sk(sk)->parent);
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 7f6a581b5b7e..2a1b64dbf76e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -904,9 +904,9 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
{
struct adv_info *adv_instance;
- /* Ignore instance 0 */
+ /* Instance 0x00 always set local name */
if (instance == 0x00)
- return 0;
+ return 1;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
@@ -923,9 +923,9 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
u8 instance = hdev->cur_adv_instance;
struct adv_info *adv_instance;
- /* Ignore instance 0 */
+ /* Instance 0x00 always set local name */
if (instance == 0x00)
- return 0;
+ return 1;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
@@ -1273,6 +1273,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
instance_flags = get_adv_instance_flags(hdev, instance);
+ /* If instance already has the flags set skip adding it once
+ * again.
+ */
+ if (adv_instance && eir_get_data(adv_instance->adv_data,
+ adv_instance->adv_data_len, EIR_FLAGS,
+ NULL))
+ goto skip_flags;
+
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
@@ -1305,6 +1313,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
}
}
+skip_flags:
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
@@ -1690,7 +1699,7 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
* scheduling it.
*/
if (adv_instance && adv_instance->duration) {
- u16 duration = adv_instance->duration * MSEC_PER_SEC;
+ u16 duration = adv_instance->timeout * MSEC_PER_SEC;
/* Time = N * 10 ms */
adv_set->duration = cpu_to_le16(duration / 10);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e804a3016902..434effde02c3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -263,6 +263,37 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
+static int br_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p;
+
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ list_for_each_entry(p, &br->port_list, list) {
+ struct ethtool_link_ksettings ecmd;
+ struct net_device *pdev = p->dev;
+
+ if (!netif_running(pdev) || !netif_oper_up(pdev))
+ continue;
+
+ if (__ethtool_get_link_ksettings(pdev, &ecmd))
+ continue;
+
+ if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
+ continue;
+
+ if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
+ cmd->base.speed < ecmd.base.speed)
+ cmd->base.speed = ecmd.base.speed;
+ }
+
+ return 0;
+}
+
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -365,8 +396,9 @@ static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
}
static const struct ethtool_ops br_ethtool_ops = {
- .get_drvinfo = br_getinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = br_getinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = br_get_link_ksettings,
};
static const struct net_device_ops br_netdev_ops = {
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 284b3662d234..4877a0db16c6 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -566,11 +566,6 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
if (hold_time(br) == 0)
return;
- /* ignore packets unless we are using this port */
- if (!(source->state == BR_STATE_LEARNING ||
- source->state == BR_STATE_FORWARDING))
- return;
-
fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
@@ -886,6 +881,9 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
br->dev->name);
return -EINVAL;
}
+ if (!nbp_state_should_learn(p))
+ return 0;
+
local_bh_disable();
rcu_read_lock();
br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f37b05090f45..8944ceb47fe9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -182,6 +182,7 @@ static void __br_handle_local_finish(struct sk_buff *skb)
/* check if vlan is allowed, to avoid spoofing */
if ((p->flags & BR_LEARNING) &&
+ nbp_state_should_learn(p) &&
!br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 08742bff9bf0..36b0367ca1e0 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -500,6 +500,11 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
return true;
}
+static inline bool nbp_state_should_learn(const struct net_bridge_port *p)
+{
+ return p->state == BR_STATE_LEARNING || p->state == BR_STATE_FORWARDING;
+}
+
static inline int br_opt_get(const struct net_bridge *br,
enum net_bridge_opts opt)
{
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index ed91ea31978a..12a4f4d93681 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -20,7 +20,6 @@ static unsigned int
ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
- struct net_device *dev;
if (skb_ensure_writable(skb, ETH_ALEN))
return EBT_DROP;
@@ -33,10 +32,22 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
else
skb->pkt_type = PACKET_MULTICAST;
} else {
- if (xt_hooknum(par) != NF_BR_BROUTING)
- dev = br_port_get_rcu(xt_in(par))->br->dev;
- else
+ const struct net_device *dev;
+
+ switch (xt_hooknum(par)) {
+ case NF_BR_BROUTING:
dev = xt_in(par);
+ break;
+ case NF_BR_PRE_ROUTING:
+ dev = br_port_get_rcu(xt_in(par))->br->dev;
+ break;
+ default:
+ dev = NULL;
+ break;
+ }
+
+ if (!dev) /* NF_BR_LOCAL_OUT */
+ return info->target;
if (ether_addr_equal(info->mac, dev->dev_addr))
skb->pkt_type = PACKET_HOST;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 5518a7d9eed9..128d37a4c2e0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -86,11 +86,12 @@ static atomic_t skbcounter = ATOMIC_INIT(0);
/* af_can socket functions */
-static void can_sock_destruct(struct sock *sk)
+void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
}
+EXPORT_SYMBOL(can_sock_destruct);
static const struct can_proto *can_get_proto(int protocol)
{
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index def2f813ffce..137054bff9ec 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -51,6 +51,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
if (!skb)
return;
+ j1939_priv_get(priv);
can_skb_set_owner(skb, iskb->sk);
/* get a pointer to the header of the skb
@@ -104,6 +105,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
j1939_simple_recv(priv, skb);
j1939_sk_recv(priv, skb);
done:
+ j1939_priv_put(priv);
kfree_skb(skb);
}
@@ -150,6 +152,10 @@ static void __j1939_priv_release(struct kref *kref)
netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
+ WARN_ON_ONCE(!list_empty(&priv->active_session_list));
+ WARN_ON_ONCE(!list_empty(&priv->ecus));
+ WARN_ON_ONCE(!list_empty(&priv->j1939_socks));
+
dev_put(ndev);
kfree(priv);
}
@@ -207,6 +213,9 @@ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
struct can_ml_priv *can_ml_priv = ndev->ml_priv;
+ if (!can_ml_priv)
+ return NULL;
+
return can_ml_priv->j1939_priv;
}
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 37c1040bcb9c..de09b0a65791 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -78,7 +78,6 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
{
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
- jsk->priv = priv;
spin_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
@@ -91,7 +90,6 @@ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
list_del_init(&jsk->list);
spin_unlock_bh(&priv->j1939_socks_lock);
- jsk->priv = NULL;
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
}
@@ -349,6 +347,34 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
spin_unlock_bh(&priv->j1939_socks_lock);
}
+static void j1939_sk_sock_destruct(struct sock *sk)
+{
+ struct j1939_sock *jsk = j1939_sk(sk);
+
+ /* This function will be call by the generic networking code, when then
+ * the socket is ultimately closed (sk->sk_destruct).
+ *
+ * The race between
+ * - processing a received CAN frame
+ * (can_receive -> j1939_can_recv)
+ * and accessing j1939_priv
+ * ... and ...
+ * - closing a socket
+ * (j1939_can_rx_unregister -> can_rx_unregister)
+ * and calling the final j1939_priv_put()
+ *
+ * is avoided by calling the final j1939_priv_put() from this
+ * RCU deferred cleanup call.
+ */
+ if (jsk->priv) {
+ j1939_priv_put(jsk->priv);
+ jsk->priv = NULL;
+ }
+
+ /* call generic CAN sock destruct */
+ can_sock_destruct(sk);
+}
+
static int j1939_sk_init(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
@@ -371,6 +397,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+ sk->sk_destruct = j1939_sk_sock_destruct;
return 0;
}
@@ -443,6 +470,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
}
jsk->ifindex = addr->can_ifindex;
+
+ /* the corresponding j1939_priv_put() is called via
+ * sk->sk_destruct, which points to j1939_sk_sock_destruct()
+ */
+ j1939_priv_get(priv);
+ jsk->priv = priv;
}
/* set default transmit pgn */
@@ -560,8 +593,8 @@ static int j1939_sk_release(struct socket *sock)
if (!sk)
return 0;
- jsk = j1939_sk(sk);
lock_sock(sk);
+ jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_BOUND) {
struct j1939_priv *priv = jsk->priv;
@@ -580,6 +613,7 @@ static int j1939_sk_release(struct socket *sock)
j1939_netdev_stop(priv);
}
+ kfree(jsk->filters);
sock_orphan(sk);
sock->sk = NULL;
@@ -909,8 +943,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
memset(serr, 0, sizeof(*serr));
switch (type) {
case J1939_ERRQUEUE_ACK:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
+ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
+ kfree_skb(skb);
return;
+ }
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -918,8 +954,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
state = "ACK";
break;
case J1939_ERRQUEUE_SCHED:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
+ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
+ kfree_skb(skb);
return;
+ }
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -1054,51 +1092,72 @@ static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
- struct j1939_priv *priv = jsk->priv;
+ struct j1939_priv *priv;
int ifindex;
int ret;
+ lock_sock(sock->sk);
/* various socket state tests */
- if (!(jsk->state & J1939_SOCK_BOUND))
- return -EBADFD;
+ if (!(jsk->state & J1939_SOCK_BOUND)) {
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
+ priv = jsk->priv;
ifindex = jsk->ifindex;
- if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR)
+ if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
/* no source address assigned yet */
- return -EBADFD;
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
/* deal with provided destination address info */
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
- if (msg->msg_namelen < J1939_MIN_NAMELEN)
- return -EINVAL;
+ if (msg->msg_namelen < J1939_MIN_NAMELEN) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
- if (addr->can_family != AF_CAN)
- return -EINVAL;
+ if (addr->can_family != AF_CAN) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
- if (addr->can_ifindex && addr->can_ifindex != ifindex)
- return -EBADFD;
+ if (addr->can_ifindex && addr->can_ifindex != ifindex) {
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
- !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
- return -EINVAL;
+ !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
if (!addr->can_addr.j1939.name &&
addr->can_addr.j1939.addr == J1939_NO_ADDR &&
- !sock_flag(sk, SOCK_BROADCAST))
+ !sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
- return -EACCES;
+ ret = -EACCES;
+ goto sendmsg_done;
+ }
} else {
if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
- !sock_flag(sk, SOCK_BROADCAST))
+ !sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
- return -EACCES;
+ ret = -EACCES;
+ goto sendmsg_done;
+ }
}
ret = j1939_sk_send_loop(priv, sk, msg, size);
+sendmsg_done:
+ release_sock(sock->sk);
+
return ret;
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe000ea757ea..9f99af5b0b11 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -255,6 +255,7 @@ static void __j1939_session_drop(struct j1939_session *session)
return;
j1939_sock_pending_del(session->sk);
+ sock_put(session->sk);
}
static void j1939_session_destroy(struct j1939_session *session)
@@ -266,6 +267,9 @@ static void j1939_session_destroy(struct j1939_session *session)
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
+ WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
+ WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
+
skb_queue_purge(&session->skb_queue);
__j1939_session_drop(session);
j1939_priv_put(session->priv);
@@ -1042,12 +1046,13 @@ j1939_session_deactivate_activate_next(struct j1939_session *session)
j1939_sk_queue_activate_next(session);
}
-static void j1939_session_cancel(struct j1939_session *session,
+static void __j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
struct j1939_priv *priv = session->priv;
WARN_ON_ONCE(!err);
+ lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
/* do not send aborts on incoming broadcasts */
@@ -1062,6 +1067,20 @@ static void j1939_session_cancel(struct j1939_session *session,
j1939_sk_send_loop_abort(session->sk, session->err);
}
+static void j1939_session_cancel(struct j1939_session *session,
+ enum j1939_xtp_abort err)
+{
+ j1939_session_list_lock(session->priv);
+
+ if (session->state >= J1939_SESSION_ACTIVE &&
+ session->state < J1939_SESSION_WAITING_ABORT) {
+ j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
+ __j1939_session_cancel(session, err);
+ }
+
+ j1939_session_list_unlock(session->priv);
+}
+
static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
{
struct j1939_session *session =
@@ -1108,8 +1127,6 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
__func__, session, ret);
if (session->skcb.addr.type != J1939_SIMPLE) {
- j1939_tp_set_rxtimeout(session,
- J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
} else {
session->err = ret;
@@ -1169,7 +1186,7 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
HRTIMER_MODE_REL_SOFT);
- j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
+ __j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
}
j1939_session_list_unlock(session->priv);
}
@@ -1273,9 +1290,27 @@ j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
static void
j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
{
+ struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
+ const u8 *dat;
+ int len;
+
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
return;
+ dat = skb->data;
+
+ if (skcb->addr.type == J1939_ETP)
+ len = j1939_etp_ctl_to_size(dat);
+ else
+ len = j1939_tp_ctl_to_size(dat);
+
+ if (session->total_message_size != len) {
+ netdev_warn_once(session->priv->ndev,
+ "%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n",
+ __func__, session, session->total_message_size,
+ len);
+ }
+
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
session->pkt.tx_acked = session->pkt.total;
@@ -1357,7 +1392,6 @@ j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
out_session_cancel:
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, err);
}
@@ -1432,7 +1466,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
skcb = j1939_skb_to_cb(skb);
memcpy(skcb, rel_skcb, sizeof(*skcb));
- session = j1939_session_new(priv, skb, skb->len);
+ session = j1939_session_new(priv, skb, size);
if (!session) {
kfree_skb(skb);
return NULL;
@@ -1554,7 +1588,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
/* RTS on active session */
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
}
@@ -1565,7 +1598,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
session->last_cmd);
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
return -EBUSY;
@@ -1767,7 +1799,6 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
}
@@ -1848,6 +1879,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
return ERR_PTR(-ENOMEM);
/* skb is recounted in j1939_session_new() */
+ sock_hold(skb->sk);
session->sk = skb->sk;
session->transmission = true;
session->pkt.total = (size + 6) / 7;
@@ -2010,7 +2042,11 @@ int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
&priv->active_session_list,
active_session_list_entry) {
if (!sk || sk == session->sk) {
- j1939_session_timers_cancel(session);
+ if (hrtimer_try_to_cancel(&session->txtimer) == 1)
+ j1939_session_put(session);
+ if (hrtimer_try_to_cancel(&session->rxtimer) == 1)
+ j1939_session_put(session);
+
session->err = ESHUTDOWN;
j1939_session_deactivate_locked(session);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index bb15800c8cb5..da78a433c10c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3607,7 +3607,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+ if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
qdisc_run_begin(q)) {
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
&q->state))) {
@@ -5611,8 +5611,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
- if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
- pinfo->nr_frags &&
+ if (!skb_headlen(skb) && pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 97e9a2246929..4c63c9a4c09e 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2791,6 +2791,9 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
{
int err;
+ if (!devlink->reload_enabled)
+ return -EOPNOTSUPP;
+
err = devlink->ops->reload_down(devlink, !!dest_net, extack);
if (err)
return err;
@@ -2809,7 +2812,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
struct net *dest_net = NULL;
int err;
- if (!devlink_reload_supported(devlink))
+ if (!devlink_reload_supported(devlink) || !devlink->reload_enabled)
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@@ -3003,6 +3006,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -4411,12 +4419,11 @@ int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
}
EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len)
+static int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
{
return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value)
@@ -4524,19 +4531,26 @@ int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len)
+ const void *value, u32 value_len)
{
+ u32 data_size;
+ u32 offset;
int err;
- err = devlink_fmsg_pair_nest_start(fmsg, name);
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
if (err)
return err;
- err = devlink_fmsg_binary_put(fmsg, value, value_len);
- if (err)
- return err;
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > DEVLINK_FMSG_MAX_SIZE)
+ data_size = DEVLINK_FMSG_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ return err;
+ }
- err = devlink_fmsg_pair_nest_end(fmsg);
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -4733,6 +4747,7 @@ struct devlink_health_reporter {
bool auto_recover;
u8 health_state;
u64 dump_ts;
+ u64 dump_real_ts;
u64 error_count;
u64 recovery_count;
u64 last_recovery_ts;
@@ -4909,6 +4924,7 @@ static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
goto dump_err;
reporter->dump_ts = jiffies;
+ reporter->dump_real_ts = ktime_get_real_ns();
return 0;
@@ -5058,6 +5074,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
jiffies_to_msecs(reporter->dump_ts),
DEVLINK_ATTR_PAD))
goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+ reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
nla_nest_end(msg, reporter_attr);
genlmsg_end(msg, hdr);
@@ -6308,6 +6328,8 @@ EXPORT_SYMBOL_GPL(devlink_register);
void devlink_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
+ WARN_ON(devlink_reload_supported(devlink) &&
+ devlink->reload_enabled);
devlink_notify(devlink, DEVLINK_CMD_DEL);
list_del(&devlink->list);
mutex_unlock(&devlink_mutex);
@@ -6315,6 +6337,41 @@ void devlink_unregister(struct devlink *devlink)
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
+ * devlink_reload_enable - Enable reload of devlink instance
+ *
+ * @devlink: devlink
+ *
+ * Should be called at end of device initialization
+ * process when reload operation is supported.
+ */
+void devlink_reload_enable(struct devlink *devlink)
+{
+ mutex_lock(&devlink_mutex);
+ devlink->reload_enabled = true;
+ mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_enable);
+
+/**
+ * devlink_reload_disable - Disable reload of devlink instance
+ *
+ * @devlink: devlink
+ *
+ * Should be called at the beginning of device cleanup
+ * process when reload operation is supported.
+ */
+void devlink_reload_disable(struct devlink *devlink)
+{
+ mutex_lock(&devlink_mutex);
+ /* Mutex is taken which ensures that no reload operation is in
+ * progress while setting up forbidded flag.
+ */
+ devlink->reload_enabled = false;
+ mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_disable);
+
+/**
* devlink_free - Free devlink instance resources
*
* @devlink: devlink
@@ -7602,6 +7659,21 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
DEVLINK_TRAP(TAIL_DROP, DROP),
+ DEVLINK_TRAP(NON_IP_PACKET, DROP),
+ DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
+ DEVLINK_TRAP(DIP_LB, DROP),
+ DEVLINK_TRAP(SIP_MC, DROP),
+ DEVLINK_TRAP(SIP_LB, DROP),
+ DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
+ DEVLINK_TRAP(IPV4_SIP_BC, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
+ DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
+ DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
+ DEVLINK_TRAP(RPF, EXCEPTION),
+ DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
+ DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
+ DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -8186,7 +8258,7 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net)
if (WARN_ON(!devlink_reload_supported(devlink)))
continue;
err = devlink_reload(devlink, &init_net, NULL);
- if (err)
+ if (err && err != -EOPNOTSUPP)
pr_warn("Failed to reload devlink instance into init_net\n");
}
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index bfe7bdd4c340..80dbf2f4016e 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -48,7 +48,7 @@ struct net_rate_estimator {
u8 intvl_log; /* period : (250ms << intvl_log) */
seqcount_t seq;
- u32 last_packets;
+ u64 last_packets;
u64 last_bytes;
u64 avpps;
@@ -83,7 +83,7 @@ static void est_timer(struct timer_list *t)
brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
brate -= (est->avbps >> est->ewma_log);
- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
+ rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
rate -= (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 36888f5e09eb..1d653fbfcf52 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -123,8 +123,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
for_each_possible_cpu(i) {
struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
unsigned int start;
- u64 bytes;
- u32 packets;
+ u64 bytes, packets;
do {
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
@@ -176,12 +175,17 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
if (d->tail) {
struct gnet_stats_basic sb;
+ int res;
memset(&sb, 0, sizeof(sb));
sb.bytes = bstats.bytes;
sb.packets = bstats.packets;
- return gnet_stats_copy(d, type, &sb, sizeof(sb),
- TCA_STATS_PAD);
+ res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
+ if (res < 0 || sb.packets == bstats.packets)
+ return res;
+ /* emit 64bit stats only if needed */
+ return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
+ sizeof(bstats.packets), TCA_STATS_PAD);
}
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5480edff0c86..652da6369037 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1197,7 +1197,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
if (update) {
hh = &neigh->hh;
- if (hh->hh_len) {
+ if (READ_ONCE(hh->hh_len)) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
@@ -1476,7 +1476,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
struct net_device *dev = neigh->dev;
unsigned int seq;
- if (dev->header_ops->cache && !neigh->hh.hh_len)
+ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
neigh_hh_init(neigh);
do {
@@ -2052,8 +2052,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
goto nla_put_failure;
{
unsigned long now = jiffies;
- unsigned int flush_delta = now - tbl->last_flush;
- unsigned int rand_delta = now - tbl->last_rand;
+ long flush_delta = now - tbl->last_flush;
+ long rand_delta = now - tbl->last_rand;
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5bc65587f1c4..a6aefe989043 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -18,6 +18,9 @@
#include <trace/events/page_pool.h>
+#define DEFER_TIME (msecs_to_jiffies(1000))
+#define DEFER_WARN_INTERVAL (60 * HZ)
+
static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params)
{
@@ -44,6 +47,21 @@ static int page_pool_init(struct page_pool *pool,
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL;
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
+ /* In order to request DMA-sync-for-device the page
+ * needs to be mapped
+ */
+ if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ return -EINVAL;
+
+ if (!pool->p.max_len)
+ return -EINVAL;
+
+ /* pool->p.offset has to be set according to the address
+ * offset used by the DMA engine to start copying rx data
+ */
+ }
+
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM;
@@ -112,6 +130,16 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
return page;
}
+static void page_pool_dma_sync_for_device(struct page_pool *pool,
+ struct page *page,
+ unsigned int dma_sync_size)
+{
+ dma_sync_size = min(dma_sync_size, pool->p.max_len);
+ dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+ pool->p.offset, dma_sync_size,
+ pool->p.dma_dir);
+}
+
/* slow path */
noinline
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
@@ -156,6 +184,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
}
page->dma_addr = dma;
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+
skip_dma_map:
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -193,22 +224,14 @@ static s32 page_pool_inflight(struct page_pool *pool)
{
u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
- s32 distance;
+ s32 inflight;
- distance = _distance(hold_cnt, release_cnt);
+ inflight = _distance(hold_cnt, release_cnt);
- trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt);
- return distance;
-}
-
-static bool __page_pool_safe_to_destroy(struct page_pool *pool)
-{
- s32 inflight = page_pool_inflight(pool);
-
- /* The distance should not be able to become negative */
+ trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
- return (inflight == 0);
+ return inflight;
}
/* Cleanup page_pool state from page */
@@ -216,6 +239,7 @@ static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
dma_addr_t dma;
+ int count;
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_unmap;
@@ -227,9 +251,11 @@ static void __page_pool_clean_page(struct page_pool *pool,
DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0;
skip_dma_unmap:
- atomic_inc(&pool->pages_state_release_cnt);
- trace_page_pool_state_release(pool, page,
- atomic_read(&pool->pages_state_release_cnt));
+ /* This may be the last page returned, releasing the pool, so
+ * it is not safe to reference pool afterwards.
+ */
+ count = atomic_inc_return(&pool->pages_state_release_cnt);
+ trace_page_pool_state_release(pool, page, count);
}
/* unmap the page and clean our state */
@@ -283,8 +309,19 @@ static bool __page_pool_recycle_direct(struct page *page,
return true;
}
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
+/* page is NOT reusable when:
+ * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
+ * 2) belongs to a different NUMA node than pool->p.nid.
+ *
+ * To update pool->p.nid users must call page_pool_update_nid.
+ */
+static bool pool_page_reusable(struct page_pool *pool, struct page *page)
+{
+ return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid;
+}
+
+void __page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
@@ -292,9 +329,14 @@ void __page_pool_put_page(struct page_pool *pool,
*
* refcnt == 1 means page_pool owns page, and can recycle it.
*/
- if (likely(page_ref_count(page) == 1)) {
+ if (likely(page_ref_count(page) == 1 &&
+ pool_page_reusable(pool, page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ page_pool_dma_sync_for_device(pool, page,
+ dma_sync_size);
+
if (allow_direct && in_serving_softirq())
if (__page_pool_recycle_direct(page, pool))
return;
@@ -338,31 +380,10 @@ static void __page_pool_empty_ring(struct page_pool *pool)
}
}
-static void __warn_in_flight(struct page_pool *pool)
+static void page_pool_free(struct page_pool *pool)
{
- u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
- u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
- s32 distance;
-
- distance = _distance(hold_cnt, release_cnt);
-
- /* Drivers should fix this, but only problematic when DMA is used */
- WARN(1, "Still in-flight pages:%d hold:%u released:%u",
- distance, hold_cnt, release_cnt);
-}
-
-void __page_pool_free(struct page_pool *pool)
-{
- /* Only last user actually free/release resources */
- if (!page_pool_put(pool))
- return;
-
- WARN(pool->alloc.count, "API usage violation");
- WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
-
- /* Can happen due to forced shutdown */
- if (!__page_pool_safe_to_destroy(pool))
- __warn_in_flight(pool);
+ if (pool->disconnect)
+ pool->disconnect(pool);
ptr_ring_cleanup(&pool->ring, NULL);
@@ -371,15 +392,14 @@ void __page_pool_free(struct page_pool *pool)
kfree(pool);
}
-EXPORT_SYMBOL(__page_pool_free);
-/* Request to shutdown: release pages cached by page_pool, and check
- * for in-flight pages
- */
-bool __page_pool_request_shutdown(struct page_pool *pool)
+static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
struct page *page;
+ if (pool->destroy_cnt)
+ return;
+
/* Empty alloc cache, assume caller made sure this is
* no-longer in use, and page_pool_alloc_pages() cannot be
* call concurrently.
@@ -388,12 +408,83 @@ bool __page_pool_request_shutdown(struct page_pool *pool)
page = pool->alloc.cache[--pool->alloc.count];
__page_pool_return_page(pool, page);
}
+}
+
+static void page_pool_scrub(struct page_pool *pool)
+{
+ page_pool_empty_alloc_cache_once(pool);
+ pool->destroy_cnt++;
/* No more consumers should exist, but producers could still
* be in-flight.
*/
__page_pool_empty_ring(pool);
+}
+
+static int page_pool_release(struct page_pool *pool)
+{
+ int inflight;
+
+ page_pool_scrub(pool);
+ inflight = page_pool_inflight(pool);
+ if (!inflight)
+ page_pool_free(pool);
+
+ return inflight;
+}
+
+static void page_pool_release_retry(struct work_struct *wq)
+{
+ struct delayed_work *dwq = to_delayed_work(wq);
+ struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
+ int inflight;
+
+ inflight = page_pool_release(pool);
+ if (!inflight)
+ return;
+
+ /* Periodic warning */
+ if (time_after_eq(jiffies, pool->defer_warn)) {
+ int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
+
+ pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
+ __func__, inflight, sec);
+ pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
+ }
+
+ /* Still not ready to be disconnected, retry later */
+ schedule_delayed_work(&pool->release_dw, DEFER_TIME);
+}
- return __page_pool_safe_to_destroy(pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
+{
+ refcount_inc(&pool->user_cnt);
+ pool->disconnect = disconnect;
+}
+
+void page_pool_destroy(struct page_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (!page_pool_put(pool))
+ return;
+
+ if (!page_pool_release(pool))
+ return;
+
+ pool->defer_start = jiffies;
+ pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
+
+ INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
+ schedule_delayed_work(&pool->release_dw, DEFER_TIME);
+}
+EXPORT_SYMBOL(page_pool_destroy);
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid)
+{
+ trace_page_pool_update_nid(pool, new_nid);
+ pool->p.nid = new_nid;
}
-EXPORT_SYMBOL(__page_pool_request_shutdown);
+EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index cf390e0aa73d..ad31e4e53d0a 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -270,18 +270,28 @@ void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
msg->sg.data[i].length -= trim;
sk_mem_uncharge(sk, trim);
+ /* Adjust copybreak if it falls into the trimmed part of last buf */
+ if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
+ msg->sg.copybreak = msg->sg.data[i].length;
out:
- /* If we trim data before curr pointer update copybreak and current
- * so that any future copy operations start at new copy location.
+ sk_msg_iter_var_next(i);
+ msg->sg.end = i;
+
+ /* If we trim data a full sg elem before curr pointer update
+ * copybreak and current so that any future copy operations
+ * start at new copy location.
* However trimed data that has not yet been used in a copy op
* does not require an update.
*/
- if (msg->sg.curr >= i) {
+ if (!msg->sg.size) {
+ msg->sg.curr = msg->sg.start;
+ msg->sg.copybreak = 0;
+ } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
+ sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
+ sk_msg_iter_var_prev(i);
msg->sg.curr = i;
msg->sg.copybreak = msg->sg.data[i].length;
}
- sk_msg_iter_var_next(i);
- msg->sg.end = i;
}
EXPORT_SYMBOL_GPL(sk_msg_trim);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 20781ad5f9c3..e334fad0a6b8 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -70,77 +70,63 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
- /* Allocator have indicated safe to remove before this is called */
- if (xa->mem.type == MEM_TYPE_PAGE_POOL)
- page_pool_free(xa->page_pool);
-
/* Allow this ID to be reused */
ida_simple_remove(&mem_id_pool, xa->mem.id);
- /* Poison memory */
- xa->mem.id = 0xFFFF;
- xa->mem.type = 0xF0F0;
- xa->allocator = (void *)0xDEAD9001;
-
kfree(xa);
}
-static bool __mem_id_disconnect(int id, bool force)
+static void mem_xa_remove(struct xdp_mem_allocator *xa)
{
- struct xdp_mem_allocator *xa;
- bool safe_to_remove = true;
+ trace_mem_disconnect(xa);
mutex_lock(&mem_id_lock);
- xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
- if (!xa) {
- mutex_unlock(&mem_id_lock);
- WARN(1, "Request remove non-existing id(%d), driver bug?", id);
- return true;
- }
- xa->disconnect_cnt++;
-
- /* Detects in-flight packet-pages for page_pool */
- if (xa->mem.type == MEM_TYPE_PAGE_POOL)
- safe_to_remove = page_pool_request_shutdown(xa->page_pool);
-
- trace_mem_disconnect(xa, safe_to_remove, force);
-
- if ((safe_to_remove || force) &&
- !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
+ if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
mutex_unlock(&mem_id_lock);
- return (safe_to_remove|force);
}
-#define DEFER_TIME (msecs_to_jiffies(1000))
-#define DEFER_WARN_INTERVAL (30 * HZ)
-#define DEFER_MAX_RETRIES 120
+static void mem_allocator_disconnect(void *allocator)
+{
+ struct xdp_mem_allocator *xa;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(mem_id_ht, &iter);
+ do {
+ rhashtable_walk_start(&iter);
+
+ while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
+ if (xa->allocator == allocator)
+ mem_xa_remove(xa);
+ }
+
+ rhashtable_walk_stop(&iter);
-static void mem_id_disconnect_defer_retry(struct work_struct *wq)
+ } while (xa == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+}
+
+static void mem_id_disconnect(int id)
{
- struct delayed_work *dwq = to_delayed_work(wq);
- struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
- bool force = false;
+ struct xdp_mem_allocator *xa;
- if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
- force = true;
+ mutex_lock(&mem_id_lock);
- if (__mem_id_disconnect(xa->mem.id, force))
+ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
+ if (!xa) {
+ mutex_unlock(&mem_id_lock);
+ WARN(1, "Request remove non-existing id(%d), driver bug?", id);
return;
+ }
- /* Periodic warning */
- if (time_after_eq(jiffies, xa->defer_warn)) {
- int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
+ trace_mem_disconnect(xa);
- pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
- __func__, xa->mem.id, xa->disconnect_cnt, sec);
- xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
- }
+ if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
+ call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
- /* Still not ready to be disconnected, retry later */
- schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
+ mutex_unlock(&mem_id_lock);
}
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
@@ -153,38 +139,21 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
return;
}
- if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
- xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
- return;
- }
-
if (id == 0)
return;
- if (__mem_id_disconnect(id, false))
- return;
-
- /* Could not disconnect, defer new disconnect attempt to later */
- mutex_lock(&mem_id_lock);
+ if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY)
+ return mem_id_disconnect(id);
- xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
- if (!xa) {
- mutex_unlock(&mem_id_lock);
- return;
+ if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
+ rcu_read_lock();
+ xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
+ page_pool_destroy(xa->page_pool);
+ rcu_read_unlock();
}
- xa->defer_start = jiffies;
- xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
-
- INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
- mutex_unlock(&mem_id_lock);
- schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
-/* This unregister operation will also cleanup and destroy the
- * allocator. The page_pool_free() operation is first called when it's
- * safe to remove, possibly deferred to a workqueue.
- */
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{
/* Simplify driver cleanup code paths, allow unreg "unused" */
@@ -371,7 +340,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
}
if (type == MEM_TYPE_PAGE_POOL)
- page_pool_get(xdp_alloc->page_pool);
+ page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
mutex_unlock(&mem_id_lock);
@@ -402,15 +371,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- if (likely(xa)) {
- napi_direct &= !xdp_return_frame_no_direct();
- page_pool_put_page(xa->page_pool, page, napi_direct);
- } else {
- /* Hopefully stack show who to blame for late return */
- WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
- trace_mem_return_failed(mem, page);
- put_page(page);
- }
+ napi_direct &= !xdp_return_frame_no_direct();
+ page_pool_put_page(xa->page_pool, page, napi_direct);
rcu_read_unlock();
break;
case MEM_TYPE_PAGE_SHARED:
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0d8f782c25cc..d19557c6d04b 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -416,7 +416,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
- newinet->inet_id = jiffies;
+ newinet->inet_id = prandom_u32();
if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
goto put_and_exit;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5bad08dc4316..a52e8ba1ced0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -944,7 +944,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
goto out;
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 3349ea81f901..e19a92a62e14 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1091,7 +1091,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
}
cb = DN_SKB_CB(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
if (newsk == NULL) {
release_sock(sk);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index e4161e0c86aa..c68503a18025 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -328,7 +328,7 @@ static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
return;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 29e2bd5cc5af..1e6c3cac11e6 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -20,7 +20,7 @@ if NET_DSA
# tagging formats
config NET_DSA_TAG_8021Q
- tristate "Tag driver for switches using custom 802.1Q VLAN headers"
+ tristate
select VLAN_8021Q
help
Unlike the other tagging protocols, the 802.1Q config option simply
@@ -79,6 +79,13 @@ config NET_DSA_TAG_KSZ
Say Y if you want to enable support for tagging frames for the
Microchip 8795/9477/9893 families of switches.
+config NET_DSA_TAG_OCELOT
+ tristate "Tag driver for Ocelot family of switches"
+ select PACKING
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Ocelot switches (VSC7511, VSC7512, VSC7513, VSC7514, VSC9959).
+
config NET_DSA_TAG_QCA
tristate "Tag driver for Qualcomm Atheros QCA8K switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 2c6d286f0511..9a482c38bdb1 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index db1c1c7e40e9..17281fec710c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -379,6 +379,43 @@ void dsa_devlink_params_unregister(struct dsa_switch *ds,
}
EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ return devlink_resource_register(ds->devlink, resource_name,
+ resource_size, resource_id,
+ parent_resource_id,
+ size_params);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds)
+{
+ devlink_resources_unregister(ds->devlink, NULL);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ return devlink_resource_occ_get_register(ds->devlink, resource_id,
+ occ_get, occ_get_priv);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
+
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id)
+{
+ devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
+
static int __init dsa_init_module(void)
{
int rc;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index ff2fa3950c62..9ef2caa13f27 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -874,12 +874,13 @@ static void dsa_switch_remove(struct dsa_switch *ds)
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp, *next;
+ dsa_tree_teardown(dst);
+
list_for_each_entry_safe(dp, next, &dst->ports, list) {
list_del(&dp->list);
kfree(dp);
}
- dsa_tree_teardown(dst);
dsa_tree_put(dst);
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 9b54e5a76297..6e93c36bf0c0 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -561,7 +561,7 @@ static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
struct dsa_switch *ds = dp->ds;
struct phy_device *phydev;
int port = dp->index;
- int mode;
+ phy_interface_t mode;
int err;
err = of_phy_register_fixed_link(dn);
@@ -574,8 +574,8 @@ static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
phydev = of_phy_find_device(dn);
- mode = of_get_phy_mode(dn);
- if (mode < 0)
+ err = of_get_phy_mode(dn, &mode);
+ if (err)
mode = PHY_INTERFACE_MODE_NA;
phydev->interface = mode;
@@ -593,10 +593,11 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *port_dn = dp->dn;
- int mode, err;
+ phy_interface_t mode;
+ int err;
- mode = of_get_phy_mode(port_dn);
- if (mode < 0)
+ err = of_get_phy_mode(port_dn, &mode);
+ if (err)
mode = PHY_INTERFACE_MODE_NA;
dp->pl_config.dev = ds->dev;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index d18761649754..78ffc87dc25e 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1313,11 +1313,12 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
+ phy_interface_t mode;
u32 phy_flags = 0;
- int mode, ret;
+ int ret;
- mode = of_get_phy_mode(port_dn);
- if (mode < 0)
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
mode = PHY_INTERFACE_MODE_NA;
dp->pl_config.dev = &slave_dev->dev;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index bc5cb91bf052..2fb6c26294b5 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -105,7 +105,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
slave = dsa_to_port(ds, port)->slave;
err = br_vlan_get_pvid(slave, &pvid);
- if (err < 0)
+ if (!pvid || err < 0)
/* There is no pvid on the bridge for this port, which is
* perfectly valid. Nothing to restore, bye-bye!
*/
@@ -341,13 +341,4 @@ struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
-static const struct dsa_device_ops dsa_8021q_netdev_ops = {
- .name = "8021q",
- .proto = DSA_TAG_PROTO_8021Q,
- .overhead = VLAN_HLEN,
-};
-
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_8021Q);
-
-module_dsa_tag_driver(dsa_8021q_netdev_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
new file mode 100644
index 000000000000..078d4790669d
--- /dev/null
+++ b/net/dsa/tag_ocelot.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
+#include "dsa_priv.h"
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | ff:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(netdev);
+ u64 bypass, dest, src, qos_class;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ u8 *injection;
+
+ if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) {
+ netdev_err(netdev, "Cannot make room for tag.\n");
+ return NULL;
+ }
+
+ injection = skb_push(skb, OCELOT_TAG_LEN);
+
+ memset(injection, 0, OCELOT_TAG_LEN);
+
+ src = dsa_upstream_port(ds, port);
+ dest = BIT(port);
+ bypass = true;
+ qos_class = skb->priority;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+
+ return skb;
+}
+
+static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct packet_type *pt)
+{
+ u64 src_port, qos_class;
+ u8 *start = skb->data;
+ u8 *extraction;
+
+ /* Revert skb->data by the amount consumed by the DSA master,
+ * so it points to the beginning of the frame.
+ */
+ skb_push(skb, ETH_HLEN);
+ /* We don't care about the long prefix, it is just for easy entrance
+ * into the DSA master's RX filter. Discard it now by moving it into
+ * the headroom.
+ */
+ skb_pull(skb, OCELOT_LONG_PREFIX_LEN);
+ /* And skb->data now points to the extraction frame header.
+ * Keep a pointer to it.
+ */
+ extraction = skb->data;
+ /* Now the EFH is part of the headroom as well */
+ skb_pull(skb, OCELOT_TAG_LEN);
+ /* Reset the pointer to the real MAC header */
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+ /* And move skb->data to the correct location again */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Remove from inet csum the extraction header */
+ skb_postpull_rcsum(skb, start, OCELOT_LONG_PREFIX_LEN + OCELOT_TAG_LEN);
+
+ packing(extraction, &src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+
+ skb->dev = dsa_master_find_slave(netdev, 0, src_port);
+ if (!skb->dev)
+ /* The switch will reflect back some frames sent through
+ * sockets opened on the bare DSA master. These will come back
+ * with src_port equal to the index of the CPU port, for which
+ * there is no slave registered. So don't print any error
+ * message here (ignore and drop those frames).
+ */
+ return NULL;
+
+ skb->offload_fwd_mark = 1;
+ skb->priority = qos_class;
+
+ return skb;
+}
+
+static struct dsa_device_ops ocelot_netdev_ops = {
+ .name = "ocelot",
+ .proto = DSA_TAG_PROTO_OCELOT,
+ .xmit = ocelot_xmit,
+ .rcv = ocelot_rcv,
+ .overhead = OCELOT_TAG_LEN + OCELOT_LONG_PREFIX_LEN,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT);
+
+module_dsa_tag_driver(ocelot_netdev_ops);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 17374afee28f..9040fe55e0f5 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -244,7 +244,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
- hh->hh_len = ETH_HLEN;
+
+ /* Pairs with READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, ETH_HLEN);
+
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 70f92aaca411..53de8e00990e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -208,7 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0913a090b2bf..f1888c683426 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1814,8 +1814,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
+ int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
struct net *net = dev_net(dev);
- int tb_id = l3mdev_fib_table(dev);
struct fib_info *fi;
if (!fib_info_laddrhash || local == 0)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a72fbdf1fb85..18068ed42f25 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -249,10 +249,11 @@ bool icmp_global_allow(void)
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock.
+ * without taking the spinlock. The READ_ONCE() are paired
+ * with the following WRITE_ONCE() in this same function.
*/
- if (!icmp_global.credit) {
- delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (!READ_ONCE(icmp_global.credit)) {
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@@ -262,14 +263,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
- icmp_global.stamp = now;
+ WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
- icmp_global.credit = credit;
+ WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index eb30fc1770de..e4c6e8b40490 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -716,7 +716,7 @@ static void reqsk_timer_handler(struct timer_list *t)
* ones are about to clog our table.
*/
qlen = reqsk_queue_len(queue);
- if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
+ if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7dc79b973e6e..af154977904c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -226,17 +226,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires =
- jiffies_to_msecs(icsk->icsk_timeout - jiffies);
+ jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
- jiffies_to_msecs(icsk->icsk_timeout - jiffies);
+ jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
- jiffies_to_msecs(sk->sk_timer.expires - jiffies);
+ jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
} else {
r->idiag_timer = 0;
r->idiag_expires = 0;
@@ -342,16 +342,13 @@ static int inet_twsk_diag_fill(struct sock *sk,
r = nlmsg_data(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
- tmo = tw->tw_timer.expires - jiffies;
- if (tmo < 0)
- tmo = 0;
-
inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
- r->idiag_expires = jiffies_to_msecs(tmo);
+ tmo = tw->tw_timer.expires - jiffies;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
@@ -385,7 +382,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
offsetof(struct sock, sk_cookie));
tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
- r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index be778599bfed..ff327a62c9ce 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
base->total / inet_peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
- delta = (__u32)jiffies - p->dtime;
+
+ /* The READ_ONCE() pairs with the WRITE_ONCE()
+ * in inet_putpeer()
+ */
+ delta = (__u32)jiffies - READ_ONCE(p->dtime);
+
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
@@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
- p->dtime = (__u32)jiffies;
+ /* The WRITE_ONCE() pairs with itself (we run lockless)
+ * and the READ_ONCE() in inet_peer_gc()
+ */
+ WRITE_ONCE(p->dtime, (__u32)jiffies);
if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3d8baaaf7086..9d83cb320dcb 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -422,7 +422,7 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
@@ -430,7 +430,7 @@ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, dev,
+ net, sk, skb, indev, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 1452a97914a0..45405d26d370 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -34,6 +34,9 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/dst_metadata.h>
+#include <net/geneve.h>
+#include <net/vxlan.h>
+#include <net/erspan.h>
const struct ip_tunnel_encap_ops __rcu *
iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
@@ -126,15 +129,14 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
if (!md || md->type != METADATA_IP_TUNNEL ||
md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
-
return NULL;
- res = metadata_dst_alloc(0, METADATA_IP_TUNNEL, flags);
+ src = &md->u.tun_info;
+ res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
if (!res)
return NULL;
dst = &res->u.tun_info;
- src = &md->u.tun_info;
dst->key.tun_id = src->key.tun_id;
if (src->mode & IP_TUNNEL_INFO_IPV6)
memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
@@ -143,6 +145,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
dst->key.tun_flags = src->key.tun_flags;
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
+ ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
+ src->options_len, 0);
return res;
}
@@ -217,24 +221,228 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
[LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
[LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
[LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_OPTS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
+ [LWTUNNEL_IP_OPTS_GENEVE] = { .type = NLA_NESTED },
+ [LWTUNNEL_IP_OPTS_VXLAN] = { .type = NLA_NESTED },
+ [LWTUNNEL_IP_OPTS_ERSPAN] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
+};
+
+static const struct nla_policy
+vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_VXLAN_GBP] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_ERSPAN_VER] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
};
+static int ip_tun_parse_opts_geneve(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
+ int data_len, err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
+ geneve_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
+ !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
+ !tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
+ return -EINVAL;
+
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
+ data_len = nla_len(attr);
+ if (data_len % 4)
+ return -EINVAL;
+
+ if (info) {
+ struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len;
+
+ memcpy(opt->opt_data, nla_data(attr), data_len);
+ opt->length = data_len / 4;
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
+ opt->opt_class = nla_get_be16(attr);
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
+ opt->type = nla_get_u8(attr);
+ info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+ }
+
+ return sizeof(struct geneve_opt) + data_len;
+}
+
+static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
+ vxlan_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
+ return -EINVAL;
+
+ if (info) {
+ struct vxlan_metadata *md =
+ ip_tunnel_info_opts(info) + opts_len;
+
+ attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
+ md->gbp = nla_get_u32(attr);
+ info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ }
+
+ return sizeof(struct vxlan_metadata);
+}
+
+static int ip_tun_parse_opts_erspan(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
+ erspan_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
+ return -EINVAL;
+
+ if (info) {
+ struct erspan_metadata *md =
+ ip_tunnel_info_opts(info) + opts_len;
+
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_VER];
+ md->version = nla_get_u8(attr);
+
+ if (md->version == 1 && tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]) {
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
+ md->u.index = nla_get_be32(attr);
+ } else if (md->version == 2 && tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] &&
+ tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]) {
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
+ md->u.md2.dir = nla_get_u8(attr);
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
+ set_hwid(&md->u.md2, nla_get_u8(attr));
+ } else {
+ return -EINVAL;
+ }
+
+ info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ }
+
+ return sizeof(struct erspan_metadata);
+}
+
+static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int err, rem, opt_len, opts_len = 0, type = 0;
+ struct nlattr *nla;
+
+ if (!attr)
+ return 0;
+
+ err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX,
+ ip_opts_policy, extack);
+ if (err)
+ return err;
+
+ nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
+ switch (nla_type(nla)) {
+ case LWTUNNEL_IP_OPTS_GENEVE:
+ if (type && type != TUNNEL_GENEVE_OPT)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ if (opts_len > IP_TUNNEL_OPTS_MAX)
+ return -EINVAL;
+ type = TUNNEL_GENEVE_OPT;
+ break;
+ case LWTUNNEL_IP_OPTS_VXLAN:
+ if (type)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_VXLAN_OPT;
+ break;
+ case LWTUNNEL_IP_OPTS_ERSPAN:
+ if (type)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_ERSPAN_OPT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return opts_len;
+}
+
+static int ip_tun_get_optlen(struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ return ip_tun_parse_opts(attr, NULL, extack);
+}
+
+static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return ip_tun_parse_opts(attr, info, extack);
+}
+
static int ip_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_info *tun_info;
- struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
- int err;
+ struct lwtunnel_state *new_state;
+ struct ip_tunnel_info *tun_info;
+ int err, opt_len;
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
ip_tun_policy, extack);
if (err < 0)
return err;
- new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
+ if (opt_len < 0)
+ return opt_len;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
if (!new_state)
return -ENOMEM;
@@ -242,6 +450,12 @@ static int ip_tun_build_state(struct nlattr *attr,
tun_info = lwt_tun_info(new_state);
+ err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
+ if (err < 0) {
+ lwtstate_free(new_state);
+ return err;
+ }
+
#ifdef CONFIG_DST_CACHE
err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
if (err) {
@@ -266,10 +480,12 @@ static int ip_tun_build_state(struct nlattr *attr,
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
if (tb[LWTUNNEL_IP_FLAGS])
- tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);
+ tun_info->key.tun_flags |=
+ (nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
+ ~TUNNEL_OPTIONS_PRESENT);
tun_info->mode = IP_TUNNEL_INFO_TX;
- tun_info->options_len = 0;
+ tun_info->options_len = opt_len;
*ts = new_state;
@@ -285,6 +501,114 @@ static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
#endif
}
+static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct geneve_opt *opt;
+ struct nlattr *nest;
+ int offset = 0;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
+ if (!nest)
+ return -ENOMEM;
+
+ while (tun_info->options_len > offset) {
+ opt = ip_tunnel_info_opts(tun_info) + offset;
+ if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ opt->opt_class) ||
+ nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
+ nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
+ opt->opt_data)) {
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+ }
+ offset += sizeof(*opt) + opt->length * 4;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
+static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct vxlan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
+ if (!nest)
+ return -ENOMEM;
+
+ md = ip_tunnel_info_opts(tun_info);
+ if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
+static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct erspan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
+ if (!nest)
+ return -ENOMEM;
+
+ md = ip_tunnel_info_opts(tun_info);
+ if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
+ goto err;
+
+ if (md->version == 1 &&
+ nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
+ goto err;
+
+ if (md->version == 2 &&
+ (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
+ nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ get_hwid(&md->u.md2))))
+ goto err;
+
+ nla_nest_end(skb, nest);
+ return 0;
+err:
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+}
+
+static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
+ struct ip_tunnel_info *tun_info)
+{
+ struct nlattr *nest;
+ int err = 0;
+
+ if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, type);
+ if (!nest)
+ return -ENOMEM;
+
+ if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
+ else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
+ err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
+ else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
+ err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
+
+ if (err) {
+ nla_nest_cancel(skb, nest);
+ return err;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
static int ip_tun_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
@@ -296,12 +620,52 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
+ nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
+ ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
return -ENOMEM;
return 0;
}
+static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
+{
+ int opt_len;
+
+ if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ return 0;
+
+ opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
+ if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ struct geneve_opt *opt;
+ int offset = 0;
+
+ opt_len += nla_total_size(0); /* LWTUNNEL_IP_OPTS_GENEVE */
+ while (info->options_len > offset) {
+ opt = ip_tunnel_info_opts(info) + offset;
+ opt_len += nla_total_size(2) /* OPT_GENEVE_CLASS */
+ + nla_total_size(1) /* OPT_GENEVE_TYPE */
+ + nla_total_size(opt->length * 4);
+ /* OPT_GENEVE_DATA */
+ offset += sizeof(*opt) + opt->length * 4;
+ }
+ } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
+ + nla_total_size(4); /* OPT_VXLAN_GBP */
+ } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
+ struct erspan_metadata *md = ip_tunnel_info_opts(info);
+
+ opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
+ + nla_total_size(1) /* OPT_ERSPAN_VER */
+ + (md->version == 1 ? nla_total_size(4)
+ /* OPT_ERSPAN_INDEX (v1) */
+ : nla_total_size(1) +
+ nla_total_size(1));
+ /* OPT_ERSPAN_DIR + HWID (v2) */
+ }
+
+ return opt_len;
+}
+
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
{
return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */
@@ -309,13 +673,21 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+ nla_total_size(4) /* LWTUNNEL_IP_SRC */
+ nla_total_size(1) /* LWTUNNEL_IP_TOS */
+ nla_total_size(1) /* LWTUNNEL_IP_TTL */
- + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
+ + nla_total_size(2) /* LWTUNNEL_IP_FLAGS */
+ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
+ /* LWTUNNEL_IP_OPTS */
}
static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
{
- return memcmp(lwt_tun_info(a), lwt_tun_info(b),
- sizeof(struct ip_tunnel_info));
+ struct ip_tunnel_info *info_a = lwt_tun_info(a);
+ struct ip_tunnel_info *info_b = lwt_tun_info(b);
+
+ return memcmp(info_a, info_b, sizeof(info_a->key)) ||
+ info_a->mode != info_b->mode ||
+ info_a->options_len != info_b->options_len ||
+ memcmp(ip_tunnel_info_opts(info_a),
+ ip_tunnel_info_opts(info_b), info_a->options_len);
}
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
@@ -341,17 +713,21 @@ static int ip6_tun_build_state(struct nlattr *attr,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_info *tun_info;
- struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
- int err;
+ struct lwtunnel_state *new_state;
+ struct ip_tunnel_info *tun_info;
+ int err, opt_len;
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
ip6_tun_policy, extack);
if (err < 0)
return err;
- new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
+ if (opt_len < 0)
+ return opt_len;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
if (!new_state)
return -ENOMEM;
@@ -359,6 +735,12 @@ static int ip6_tun_build_state(struct nlattr *attr,
tun_info = lwt_tun_info(new_state);
+ err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
+ if (err < 0) {
+ lwtstate_free(new_state);
+ return err;
+ }
+
if (tb[LWTUNNEL_IP6_ID])
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
@@ -375,10 +757,12 @@ static int ip6_tun_build_state(struct nlattr *attr,
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
if (tb[LWTUNNEL_IP6_FLAGS])
- tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
+ tun_info->key.tun_flags |=
+ (nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
+ ~TUNNEL_OPTIONS_PRESENT);
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
- tun_info->options_len = 0;
+ tun_info->options_len = opt_len;
*ts = new_state;
@@ -396,7 +780,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+ nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
+ ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
return -ENOMEM;
return 0;
@@ -409,7 +794,9 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+ nla_total_size(16) /* LWTUNNEL_IP6_SRC */
+ nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
+ nla_total_size(1) /* LWTUNNEL_IP6_TC */
- + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
+ + nla_total_size(2) /* LWTUNNEL_IP6_FLAGS */
+ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
+ /* LWTUNNEL_IP6_OPTS */
}
static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 32e20b758b68..f35308ff84c3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1412,6 +1412,9 @@ static int __init wait_for_devices(void)
struct net_device *dev;
int found = 0;
+ /* make sure deferred device probes are finished */
+ wait_for_device_probe();
+
rtnl_lock();
for_each_netdev(&init_net, dev) {
if (ic_is_init_dev(dev)) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 440294bdb752..6e68def66822 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2291,7 +2291,8 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
rcu_read_unlock();
return -ENODEV;
}
- skb2 = skb_clone(skb, GFP_ATOMIC);
+
+ skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
if (!skb2) {
read_unlock(&mrt_lock);
rcu_read_unlock();
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index 012c4047c788..e32e41b99f0f 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -9,6 +9,8 @@
static struct nf_flowtable_type flowtable_ipv4 = {
.family = NFPROTO_IPV4,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv4,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ip_hook,
.owner = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
index 36a28d46149c..c94445b44d8c 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -31,16 +31,8 @@ extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol,
if (icmph == NULL)
return 1;
- switch (icmph->type) {
- case ICMP_DEST_UNREACH:
- case ICMP_SOURCE_QUENCH:
- case ICMP_REDIRECT:
- case ICMP_TIME_EXCEEDED:
- case ICMP_PARAMETERPROB:
- break;
- default:
+ if (!icmp_is_err(icmph->type))
return 1;
- }
inside_iph = skb_header_pointer(skb, outside_hdrlen +
sizeof(struct icmphdr),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 621f83434b24..dcc4fa10138d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1894,10 +1894,7 @@ static void ip_multipath_l3_keys(const struct sk_buff *skb,
if (!icmph)
goto out;
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_REDIRECT &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB)
+ if (!icmp_is_err(icmph->type))
goto out;
inner_iph = skb_header_pointer(skb,
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 535b69326f66..345b2b0ff618 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -62,10 +62,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-u64 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req, u64 now)
{
struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp_raw();
+ u32 ts, ts_now = tcp_ns_to_ts(now);
u32 options = 0;
ireq = inet_rsk(req);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1dd25189d83f..9b48aec29aca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1958,8 +1958,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
struct sk_buff *skb, *last;
u32 urg_hole = 0;
struct scm_timestamping_internal tss;
- bool has_tss = false;
- bool has_cmsg;
+ int cmsg_flags;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
@@ -1974,7 +1973,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (sk->sk_state == TCP_LISTEN)
goto out;
- has_cmsg = tp->recvmsg_inq;
+ cmsg_flags = tp->recvmsg_inq ? 1 : 0;
timeo = sock_rcvtimeo(sk, nonblock);
/* Urgent data needs to be handled specially. */
@@ -2047,7 +2046,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -2157,8 +2156,7 @@ skip_copy:
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, &tss);
- has_tss = true;
- has_cmsg = true;
+ cmsg_flags |= 2;
}
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
@@ -2183,10 +2181,10 @@ found_fin_ok:
release_sock(sk);
- if (has_cmsg) {
- if (has_tss)
+ if (cmsg_flags) {
+ if (cmsg_flags & 2)
tcp_recv_timestamp(msg, sk, &tss);
- if (tp->recvmsg_inq) {
+ if (cmsg_flags & 1) {
inq = tcp_inq_hint(sk);
put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
}
@@ -3225,8 +3223,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
* tcpi_unacked -> Number of children ready for accept()
* tcpi_sacked -> max backlog
*/
- info->tcpi_unacked = sk->sk_ack_backlog;
- info->tcpi_sacked = sk->sk_max_ack_backlog;
+ info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
+ info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
return;
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 549506162dde..0d08f9e2d8d0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,8 +21,8 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
struct tcp_info *info = _info;
if (inet_sk_state_load(sk) == TCP_LISTEN) {
- r->idiag_rqueue = sk->sk_ack_backlog;
- r->idiag_wqueue = sk->sk_max_ack_backlog;
+ r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 899e100a68e6..92282f98dc82 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2451,7 +2451,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
- rx_queue = sk->sk_ack_backlog;
+ rx_queue = READ_ONCE(sk->sk_ack_backlog);
else
/* Because we don't lock the socket,
* we might find a transient negative value.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0488607c5cd3..be6d22b8190f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3290,7 +3290,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
now = tcp_clock_ns();
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp_ns = cookie_init_timestamp(req);
+ skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
else
#endif
{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1d58ce829dca..da805ee7558b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2534,9 +2534,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_ENCAP:
switch (val) {
case 0:
+#ifdef CONFIG_XFRM
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
+#endif
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index ecff3fce9807..89ba7c87de5d 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -92,7 +92,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst(skb)->dev,
__xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 71827b56c006..945508a7cb0f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -160,7 +160,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
skb->protocol = htons(ETH_P_IPV6);
@@ -173,7 +173,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, dev,
+ net, sk, skb, indev, dev,
ip6_finish_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index f6d9a48c7a2a..a8566ee12e83 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -10,6 +10,8 @@
static struct nf_flowtable_type flowtable_ipv6 = {
.family = NFPROTO_IPV6,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv6,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ipv6_hook,
.owner = THIS_MODULE,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a63ff85fe141..edcb52543518 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -621,6 +621,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
{
struct __rt6_probe_work *work = NULL;
const struct in6_addr *nh_gw;
+ unsigned long last_probe;
struct neighbour *neigh;
struct net_device *dev;
struct inet6_dev *idev;
@@ -639,6 +640,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
nh_gw = &fib6_nh->fib_nh_gw6;
dev = fib6_nh->fib_nh_dev;
rcu_read_lock_bh();
+ last_probe = READ_ONCE(fib6_nh->last_probe);
idev = __in6_dev_get(dev);
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
@@ -654,13 +656,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
__neigh_set_probe_once(neigh);
}
write_unlock(&neigh->lock);
- } else if (time_after(jiffies, fib6_nh->last_probe +
+ } else if (time_after(jiffies, last_probe +
idev->cnf.rtr_probe_interval)) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
- if (work) {
- fib6_nh->last_probe = jiffies;
+ if (!work || cmpxchg(&fib6_nh->last_probe,
+ last_probe, jiffies) != last_probe) {
+ kfree(work);
+ } else {
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
dev_hold(dev);
@@ -1475,11 +1479,11 @@ static u32 rt6_exception_hash(const struct in6_addr *dst,
u32 val;
net_get_random_once(&seed, sizeof(seed));
- val = jhash(dst, sizeof(*dst), seed);
+ val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
#ifdef CONFIG_IPV6_SUBTREES
if (src)
- val = jhash(src, sizeof(*src), val);
+ val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
#endif
return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
}
@@ -2291,10 +2295,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
if (!icmph)
goto out;
- if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
- icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
- icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
- icmph->icmp6_type != ICMPV6_PARAMPROB)
+ if (!icmpv6_is_err(icmph->icmp6_type))
goto out;
inner_iph = skb_header_pointer(skb,
@@ -3383,6 +3384,9 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
int err;
fib6_nh->fib_nh_family = AF_INET6;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ fib6_nh->last_probe = jiffies;
+#endif
err = -ENODEV;
if (cfg->fc_ifindex) {
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 9d4f75e0d33a..e70567446f28 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -81,6 +81,11 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
if (!pskb_may_pull(skb, srhoff + len))
return NULL;
+ /* note that pskb_may_pull may change pointers in header;
+ * for this reason it is necessary to reload them when needed.
+ */
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
if (!seg6_validate_srh(srh, len))
return NULL;
@@ -336,6 +341,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (!ipv6_addr_any(&slwt->nh6))
nhaddr = &slwt->nh6;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
seg6_lookup_nexthop(skb, nhaddr, 0);
return dst_input(skb);
@@ -365,6 +372,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
skb_dst_drop(skb);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+
err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
if (err)
goto drop;
@@ -385,6 +394,8 @@ static int input_action_end_dt6(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
seg6_lookup_nexthop(skb, NULL, slwt->table);
return dst_input(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4804b6dc5e65..81f51335e326 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1891,7 +1891,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
state = inet_sk_state_load(sp);
if (state == TCP_LISTEN)
- rx_queue = sp->sk_ack_backlog;
+ rx_queue = READ_ONCE(sp->sk_ack_backlog);
else
/* Because we don't lock the socket,
* we might find a transient negative value.
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index eecac1b7148e..fbe51d40bd7e 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -187,7 +187,7 @@ skip_frag:
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst(skb)->dev,
__xfrm6_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c74f44dfaa22..2922d4150d88 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -705,7 +705,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
/* put original socket back into a clean listen state. */
sk->sk_state = TCP_LISTEN;
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
dprintk("%s: ok success on %02X, client on %02X\n", __func__,
llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
frees:
@@ -780,7 +780,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 70739e746c13..4fb7f1f12109 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3428,7 +3428,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
spin_lock_irqsave(&local->ack_status_lock, spin_flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x10000, GFP_ATOMIC);
+ 1, 0x40, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
if (id < 0) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aba094b4ccfc..2d05c4cfaf6d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1292,8 +1292,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_remove_interfaces(local);
fail_rate:
rtnl_unlock();
- ieee80211_led_exit(local);
fail_flows:
+ ieee80211_led_exit(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 54dd8849d1cc..5fa13176036f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3186,15 +3186,14 @@ static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss,
- struct ieee80211_mgmt *mgmt, size_t len)
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
- u8 *pos;
u16 capab_info, aid;
- struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
const struct cfg80211_bss_ies *bss_ies = NULL;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
@@ -3222,19 +3221,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ifmgd->broken_ap = true;
}
- pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
- mgmt->bssid, assoc_data->bss->bssid);
-
- if (!elems.supp_rates) {
+ if (!elems->supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
return false;
}
ifmgd->aid = aid;
ifmgd->tdls_chan_switch_prohibited =
- elems.ext_capab && elems.ext_capab_len >= 5 &&
- (elems.ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
+ elems->ext_capab && elems->ext_capab_len >= 5 &&
+ (elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
/*
* Some APs are erroneously not including some information in their
@@ -3243,11 +3238,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
* "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
*/
- if ((assoc_data->wmm && !elems.wmm_param) ||
+ if ((assoc_data->wmm && !elems->wmm_param) ||
(!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- (!elems.ht_cap_elem || !elems.ht_operation)) ||
+ (!elems->ht_cap_elem || !elems->ht_operation)) ||
(!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- (!elems.vht_cap_elem || !elems.vht_operation))) {
+ (!elems->vht_cap_elem || !elems->vht_operation))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems bss_elems;
@@ -3265,8 +3260,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
mgmt->bssid,
assoc_data->bss->bssid);
if (assoc_data->wmm &&
- !elems.wmm_param && bss_elems.wmm_param) {
- elems.wmm_param = bss_elems.wmm_param;
+ !elems->wmm_param && bss_elems.wmm_param) {
+ elems->wmm_param = bss_elems.wmm_param;
sdata_info(sdata,
"AP bug: WMM param missing from AssocResp\n");
}
@@ -3275,27 +3270,27 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* Also check if we requested HT/VHT, otherwise the AP doesn't
* have to include the IEs in the (re)association response.
*/
- if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
+ if (!elems->ht_cap_elem && bss_elems.ht_cap_elem &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_cap_elem = bss_elems.ht_cap_elem;
+ elems->ht_cap_elem = bss_elems.ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
- if (!elems.ht_operation && bss_elems.ht_operation &&
+ if (!elems->ht_operation && bss_elems.ht_operation &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_operation = bss_elems.ht_operation;
+ elems->ht_operation = bss_elems.ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
- if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
+ if (!elems->vht_cap_elem && bss_elems.vht_cap_elem &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_cap_elem = bss_elems.vht_cap_elem;
+ elems->vht_cap_elem = bss_elems.vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
- if (!elems.vht_operation && bss_elems.vht_operation &&
+ if (!elems->vht_operation && bss_elems.vht_operation &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_operation = bss_elems.vht_operation;
+ elems->vht_operation = bss_elems.vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
}
@@ -3306,7 +3301,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* they should be present here. This is just a safety net.
*/
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
+ (!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
ret = false;
@@ -3314,7 +3309,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- (!elems.vht_cap_elem || !elems.vht_operation)) {
+ (!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
ret = false;
@@ -3341,7 +3336,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
- (!elems.he_cap || !elems.he_operation)) {
+ (!elems->he_cap || !elems->he_operation)) {
mutex_unlock(&sdata->local->sta_mtx);
sdata_info(sdata,
"HE AP is missing HE capability/operation\n");
@@ -3350,23 +3345,23 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
/* Set up internal HT/VHT capabilities */
- if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+ if (elems->ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems.ht_cap_elem, sta);
+ elems->ht_cap_elem, sta);
- if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ if (elems->vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems.vht_cap_elem, sta);
+ elems->vht_cap_elem, sta);
- if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
- elems.he_cap) {
+ if (elems->he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+ elems->he_cap) {
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
- elems.he_cap,
- elems.he_cap_len,
+ elems->he_cap,
+ elems->he_cap_len,
sta);
bss_conf->he_support = sta->sta.he_cap.has_he;
- changed |= ieee80211_recalc_twt_req(sdata, sta, &elems);
+ changed |= ieee80211_recalc_twt_req(sdata, sta, elems);
} else {
bss_conf->he_support = false;
bss_conf->twt_requester = false;
@@ -3374,14 +3369,14 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (bss_conf->he_support) {
bss_conf->bss_color =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
bss_conf->htc_trig_based_pkt_ext =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
bss_conf->frame_time_rts_th =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
bss_conf->multi_sta_back_32bit =
@@ -3392,12 +3387,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN;
- bss_conf->uora_exists = !!elems.uora_element;
- if (elems.uora_element)
- bss_conf->uora_ocw_range = elems.uora_element[0];
+ bss_conf->uora_exists = !!elems->uora_element;
+ if (elems->uora_element)
+ bss_conf->uora_ocw_range = elems->uora_element[0];
- ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems.he_operation);
- ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems.he_spr);
+ ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems->he_operation);
+ ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems->he_spr);
/* TODO: OPEN: what happens if BSS color disable is set? */
}
@@ -3421,11 +3416,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* NSS calculation (that would be done in rate_control_rate_init())
* and use the # of streams from that element.
*/
- if (elems.opmode_notif &&
- !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
+ if (elems->opmode_notif &&
+ !(*elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
u8 nss;
- nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
+ nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
sta->sta.rx_nss = nss;
@@ -3440,7 +3435,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.mfp = false;
}
- sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
+ sta->sta.wme = elems->wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -3468,9 +3463,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ieee80211_set_wmm_default(sdata, false, false);
- } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len,
- elems.mu_edca_param_set)) {
+ } else if (!ieee80211_sta_wmm_params(local, sdata, elems->wmm_param,
+ elems->wmm_param_len,
+ elems->mu_edca_param_set)) {
/* still enable QoS since we might have HT/VHT */
ieee80211_set_wmm_default(sdata, false, true);
/* set the disable-WMM flag in this case to disable
@@ -3484,11 +3479,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
changed |= BSS_CHANGED_QOS;
- if (elems.max_idle_period_ie) {
+ if (elems->max_idle_period_ie) {
bss_conf->max_idle_period =
- le16_to_cpu(elems.max_idle_period_ie->max_idle_period);
+ le16_to_cpu(elems->max_idle_period_ie->max_idle_period);
bss_conf->protected_keep_alive =
- !!(elems.max_idle_period_ie->idle_options &
+ !!(elems->max_idle_period_ie->idle_options &
WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE);
changed |= BSS_CHANGED_KEEP_ALIVE;
} else {
@@ -3598,7 +3593,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
- if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
+ if (!ieee80211_assoc_success(sdata, bss, mgmt, len, &elems)) {
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, false, false);
cfg80211_assoc_timeout(sdata->dev, bss);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index bd11fef2139f..8d3a2389b055 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2457,7 +2457,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
- if (time_after(stats->last_rx, sta->status_stats.last_ack))
+ if (!sta->status_stats.last_ack ||
+ time_after(stats->last_rx, sta->status_stats.last_ack))
return stats->last_rx;
return sta->status_stats.last_ack;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 938c10f7955b..db38be1b75fa 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2417,6 +2417,33 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_store_ack_skb(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ u32 *info_flags)
+{
+ struct sk_buff *ack_skb = skb_clone_sk(skb);
+ u16 info_id = 0;
+
+ if (ack_skb) {
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
+ 1, 0x40, GFP_ATOMIC);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (id >= 0) {
+ info_id = id;
+ *info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ } else {
+ kfree_skb(ack_skb);
+ }
+ }
+
+ return info_id;
+}
+
/**
* ieee80211_build_hdr - build 802.11 header in the given frame
* @sdata: virtual interface to build the header for
@@ -2710,26 +2737,8 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
}
if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
- struct sk_buff *ack_skb = skb_clone_sk(skb);
-
- if (ack_skb) {
- unsigned long flags;
- int id;
-
- spin_lock_irqsave(&local->ack_status_lock, flags);
- id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x10000, GFP_ATOMIC);
- spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
- if (id >= 0) {
- info_id = id;
- info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- } else {
- kfree_skb(ack_skb);
- }
- }
- }
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
+ info_id = ieee80211_store_ack_skb(local, skb, &info_flags);
/*
* If the skb is shared we need to obtain our own copy.
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4fc075b612fe..5e9b2eb24349 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -120,7 +120,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
# flow table infrastructure
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
-nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
+nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
+ nf_flow_table_offload.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 35cf59e4004b..169e0a04f814 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -296,7 +296,8 @@ ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
if (unlikely(!flag_nested(nla)))
return -IPSET_ERR_PROTOCOL;
- if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL))
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
+ ipaddr_policy, NULL))
return -IPSET_ERR_PROTOCOL;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
return -IPSET_ERR_PROTOCOL;
@@ -314,7 +315,8 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
if (unlikely(!flag_nested(nla)))
return -IPSET_ERR_PROTOCOL;
- if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL))
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
+ ipaddr_policy, NULL))
return -IPSET_ERR_PROTOCOL;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
return -IPSET_ERR_PROTOCOL;
@@ -1100,7 +1102,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Without holding any locks, create private part. */
if (attr[IPSET_ATTR_DATA] &&
- nla_parse_nested_deprecated(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) {
+ nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+ set->type->create_policy, NULL)) {
ret = -IPSET_ERR_PROTOCOL;
goto put_out;
}
@@ -1471,6 +1474,14 @@ dump_attrs(struct nlmsghdr *nlh)
}
}
+static const struct nla_policy
+ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
+};
+
static int
dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
{
@@ -1482,9 +1493,9 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
ip_set_id_t index;
int ret;
- ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr,
- nlh->nlmsg_len - min_len,
- ip_set_setname_policy, NULL);
+ ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
+ nlh->nlmsg_len - min_len,
+ ip_set_dump_policy, NULL);
if (ret)
return ret;
@@ -1733,9 +1744,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
cmdattr = (void *)&errmsg->msg + min_len;
- ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
- nlh->nlmsg_len - min_len,
- ip_set_adt_policy, NULL);
+ ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
+ nlh->nlmsg_len - min_len, ip_set_adt_policy,
+ NULL);
if (ret) {
nlmsg_free(skb2);
@@ -1786,7 +1797,9 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
use_lineno = !!attr[IPSET_ATTR_LINENO];
if (attr[IPSET_ATTR_DATA]) {
- if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, adt, flags,
use_lineno);
@@ -1796,7 +1809,8 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
if (nla_type(nla) != IPSET_ATTR_DATA ||
!flag_nested(nla) ||
- nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL))
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, adt,
flags, use_lineno);
@@ -1845,7 +1859,8 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
if (!set)
return -ENOENT;
- if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+ set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
@@ -2151,7 +2166,7 @@ static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
[IPSET_CMD_LIST] = {
.call = ip_set_dump,
.attr_count = IPSET_ATTR_CMD_MAX,
- .policy = ip_set_setname_policy,
+ .policy = ip_set_dump_policy,
},
[IPSET_CMD_SAVE] = {
.call = ip_set_dump,
@@ -2259,8 +2274,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
}
req_version->version = IPSET_PROTOCOL;
- ret = copy_to_user(user, req_version,
- sizeof(struct ip_set_req_version));
+ if (copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version)))
+ ret = -EFAULT;
goto done;
}
case IP_SET_OP_GET_BYNAME: {
@@ -2319,7 +2335,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
} /* end of switch(op) */
copy:
- ret = copy_to_user(user, data, copylen);
+ if (copy_to_user(user, data, copylen))
+ ret = -EFAULT;
done:
vfree(data);
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index e28cd72db6ad..eceb7bc4a93a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -209,7 +209,7 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- if (opt->flags & IPSET_DIM_ONE_SRC)
+ if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 86133fae4b69..136cf0781d3a 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -368,6 +368,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 1a04e0929738..be5e95a0d876 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -25,7 +25,8 @@
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */
+/* 6 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -57,6 +58,7 @@ struct hash_netiface4_elem {
u8 cidr;
u8 nomatch;
u8 elem;
+ u8 wildcard;
char iface[IFNAMSIZ];
};
@@ -71,7 +73,9 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
- strcmp(ip1->iface, ip2->iface) == 0;
+ (ip1->wildcard ?
+ strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
+ strcmp(ip1->iface, ip2->iface) == 0);
}
static int
@@ -103,7 +107,8 @@ static bool
hash_netiface4_data_list(struct sk_buff *skb,
const struct hash_netiface4_elem *data)
{
- u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
+ (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
@@ -229,6 +234,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
+ if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
+ e.wildcard = 1;
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
@@ -280,6 +287,7 @@ struct hash_netiface6_elem {
u8 cidr;
u8 nomatch;
u8 elem;
+ u8 wildcard;
char iface[IFNAMSIZ];
};
@@ -294,7 +302,9 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
- strcmp(ip1->iface, ip2->iface) == 0;
+ (ip1->wildcard ?
+ strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
+ strcmp(ip1->iface, ip2->iface) == 0);
}
static int
@@ -326,7 +336,8 @@ static bool
hash_netiface6_data_list(struct sk_buff *skb,
const struct hash_netiface6_elem *data)
{
- u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
+ (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
@@ -440,6 +451,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
+ if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
+ e.wildcard = 1;
}
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index bcb6d0b4db36..da4ef910b12d 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -476,6 +476,7 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 097deba7441a..c2e3dff773bc 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -235,11 +235,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
}
/* Need to track icmp error message? */
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_SOURCE_QUENCH &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB &&
- icmph->type != ICMP_REDIRECT)
+ if (!icmp_is_err(icmph->type))
return NF_ACCEPT;
memset(&outer_daddr, 0, sizeof(outer_daddr));
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 128245efe84a..9889d52eda82 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -14,24 +14,15 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
-struct flow_offload_entry {
- struct flow_offload flow;
- struct nf_conn *ct;
- struct rcu_head rcu_head;
-};
-
static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables);
static void
-flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
- struct nf_flow_route *route,
+flow_offload_fill_dir(struct flow_offload *flow,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
- struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
- struct dst_entry *other_dst = route->tuple[!dir].dst;
- struct dst_entry *dst = route->tuple[dir].dst;
+ struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
ft->dir = dir;
@@ -39,12 +30,10 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
case NFPROTO_IPV4:
ft->src_v4 = ctt->src.u3.in;
ft->dst_v4 = ctt->dst.u3.in;
- ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
break;
case NFPROTO_IPV6:
ft->src_v6 = ctt->src.u3.in6;
ft->dst_v6 = ctt->dst.u3.in6;
- ft->mtu = ip6_dst_mtu_forward(dst);
break;
}
@@ -52,37 +41,24 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->l4proto = ctt->dst.protonum;
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
-
- ft->iifidx = other_dst->dev->ifindex;
- ft->dst_cache = dst;
}
-struct flow_offload *
-flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
{
- struct flow_offload_entry *entry;
struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct) ||
!atomic_inc_not_zero(&ct->ct_general.use)))
return NULL;
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
goto err_ct_refcnt;
- flow = &entry->flow;
+ flow->ct = ct;
- if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
- goto err_dst_cache_original;
-
- if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
- goto err_dst_cache_reply;
-
- entry->ct = ct;
-
- flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
- flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
+ flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT)
flow->flags |= FLOW_OFFLOAD_SNAT;
@@ -91,10 +67,6 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
return flow;
-err_dst_cache_reply:
- dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
-err_dst_cache_original:
- kfree(entry);
err_ct_refcnt:
nf_ct_put(ct);
@@ -102,6 +74,56 @@ err_ct_refcnt:
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
+static int flow_offload_fill_route(struct flow_offload *flow,
+ const struct nf_flow_route *route,
+ enum flow_offload_tuple_dir dir)
+{
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ if (!dst_hold_safe(route->tuple[dir].dst))
+ return -1;
+
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+ flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
+ break;
+ case NFPROTO_IPV6:
+ flow_tuple->mtu = ip6_dst_mtu_forward(dst);
+ break;
+ }
+
+ flow_tuple->iifidx = other_dst->dev->ifindex;
+ flow_tuple->dst_cache = dst;
+
+ return 0;
+}
+
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route)
+{
+ int err;
+
+ err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+ if (err < 0)
+ return err;
+
+ err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+ if (err < 0)
+ goto err_route_reply;
+
+ flow->type = NF_FLOW_OFFLOAD_ROUTE;
+
+ return 0;
+
+err_route_reply:
+ dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(flow_offload_route_init);
+
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{
tcp->state = TCP_CONNTRACK_ESTABLISHED;
@@ -150,17 +172,25 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
flow_offload_fixup_ct_timeout(ct);
}
-void flow_offload_free(struct flow_offload *flow)
+static void flow_offload_route_release(struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
- e = container_of(flow, struct flow_offload_entry, flow);
+}
+
+void flow_offload_free(struct flow_offload *flow)
+{
+ switch (flow->type) {
+ case NF_FLOW_OFFLOAD_ROUTE:
+ flow_offload_route_release(flow);
+ break;
+ default:
+ break;
+ }
if (flow->flags & FLOW_OFFLOAD_DYING)
- nf_ct_delete(e->ct, 0, 0);
- nf_ct_put(e->ct);
- kfree_rcu(e, rcu_head);
+ nf_ct_delete(flow->ct, 0, 0);
+ nf_ct_put(flow->ct);
+ kfree_rcu(flow, rcu_head);
}
EXPORT_SYMBOL_GPL(flow_offload_free);
@@ -220,6 +250,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err;
}
+ if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ nf_flow_offload_add(flow_table, flow);
+
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);
@@ -232,8 +265,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
nf_flow_offload_rhash_params);
@@ -241,25 +272,21 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params);
- e = container_of(flow, struct flow_offload_entry, flow);
- clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+ clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
if (nf_flow_has_expired(flow))
- flow_offload_fixup_ct(e->ct);
+ flow_offload_fixup_ct(flow->ct);
else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
- flow_offload_fixup_ct_timeout(e->ct);
+ flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow);
}
void flow_offload_teardown(struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
flow->flags |= FLOW_OFFLOAD_TEARDOWN;
- e = container_of(flow, struct flow_offload_entry, flow);
- flow_offload_fixup_ct_state(e->ct);
+ flow_offload_fixup_ct_state(flow->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -269,7 +296,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
{
struct flow_offload_tuple_rhash *tuplehash;
struct flow_offload *flow;
- struct flow_offload_entry *e;
int dir;
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
@@ -282,8 +308,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
return NULL;
- e = container_of(flow, struct flow_offload_entry, flow);
- if (unlikely(nf_ct_is_dying(e->ct)))
+ if (unlikely(nf_ct_is_dying(flow->ct)))
return NULL;
return tuplehash;
@@ -327,12 +352,21 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
- struct flow_offload_entry *e;
- e = container_of(flow, struct flow_offload_entry, flow);
- if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
- (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
- flow_offload_del(flow_table, flow);
+ if (flow->flags & FLOW_OFFLOAD_HW)
+ nf_flow_offload_stats(flow_table, flow);
+
+ if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
+ (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
+ if (flow->flags & FLOW_OFFLOAD_HW) {
+ if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
+ nf_flow_offload_del(flow_table, flow);
+ else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
+ flow_offload_del(flow_table, flow);
+ } else {
+ flow_offload_del(flow_table, flow);
+ }
+ }
}
static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -465,6 +499,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
int err;
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+ flow_block_init(&flowtable->flow_block);
err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params);
@@ -485,15 +520,13 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{
struct net_device *dev = data;
- struct flow_offload_entry *e;
-
- e = container_of(flow, struct flow_offload_entry, flow);
if (!dev) {
flow_offload_teardown(flow);
return;
}
- if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
+
+ if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
(flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_dead(flow);
@@ -502,6 +535,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
+ nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
}
@@ -529,5 +563,18 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
+static int __init nf_flow_table_module_init(void)
+{
+ return nf_flow_table_offload_init();
+}
+
+static void __exit nf_flow_table_module_exit(void)
+{
+ nf_flow_table_offload_exit();
+}
+
+module_init(nf_flow_table_module_init);
+module_exit(nf_flow_table_module_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 593357aedb36..88bedf1ff1ae 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -21,9 +21,34 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
}
+static int nf_flow_rule_route_inet(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ int err;
+
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+ err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule);
+ break;
+ case NFPROTO_IPV6:
+ err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule);
+ break;
+ default:
+ err = -1;
+ break;
+ }
+
+ return err;
+}
+
static struct nf_flowtable_type flowtable_inet = {
.family = NFPROTO_INET,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_inet,
.free = nf_flow_table_free,
.hook = nf_flow_offload_inet_hook,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
new file mode 100644
index 000000000000..c54c9a6cc981
--- /dev/null
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -0,0 +1,851 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/tc_act/tc_csum.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+static struct work_struct nf_flow_offload_work;
+static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
+static LIST_HEAD(flow_offload_pending_list);
+
+struct flow_offload_work {
+ struct list_head list;
+ enum flow_cls_command cmd;
+ int priority;
+ struct nf_flowtable *flowtable;
+ struct flow_offload *flow;
+};
+
+struct nf_flow_key {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
+#define NF_FLOW_DISSECTOR(__match, __type, __field) \
+ (__match)->dissector.offset[__type] = \
+ offsetof(struct nf_flow_key, __field)
+
+static int nf_flow_rule_match(struct nf_flow_match *match,
+ const struct flow_offload_tuple *tuple)
+{
+ struct nf_flow_key *mask = &match->mask;
+ struct nf_flow_key *key = &match->key;
+
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
+
+ switch (tuple->l3proto) {
+ case AF_INET:
+ key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ key->basic.n_proto = htons(ETH_P_IP);
+ key->ipv4.src = tuple->src_v4.s_addr;
+ mask->ipv4.src = 0xffffffff;
+ key->ipv4.dst = tuple->dst_v4.s_addr;
+ mask->ipv4.dst = 0xffffffff;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ mask->basic.n_proto = 0xffff;
+
+ switch (tuple->l4proto) {
+ case IPPROTO_TCP:
+ key->tcp.flags = 0;
+ mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ key->basic.ip_proto = tuple->l4proto;
+ mask->basic.ip_proto = 0xff;
+
+ key->tp.src = tuple->src_port;
+ mask->tp.src = 0xffff;
+ key->tp.dst = tuple->dst_port;
+ mask->tp.dst = 0xffff;
+
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ return 0;
+}
+
+static void flow_offload_mangle(struct flow_action_entry *entry,
+ enum flow_action_mangle_base htype,
+ u32 offset, u8 *value, u8 *mask)
+{
+ entry->id = FLOW_ACTION_MANGLE;
+ entry->mangle.htype = htype;
+ entry->mangle.offset = offset;
+ memcpy(&entry->mangle.mask, mask, sizeof(u32));
+ memcpy(&entry->mangle.val, value, sizeof(u32));
+}
+
+static inline struct flow_action_entry *
+flow_action_entry_next(struct nf_flow_rule *flow_rule)
+{
+ int i = flow_rule->rule->action.num_entries++;
+
+ return &flow_rule->rule->action.entries[i];
+}
+
+static int flow_offload_eth_src(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ struct net_device *dev;
+ u32 mask, val;
+ u16 val16;
+
+ dev = dev_get_by_index(net, tuple->iifidx);
+ if (!dev)
+ return -ENOENT;
+
+ mask = ~0xffff0000;
+ memcpy(&val16, dev->dev_addr, 2);
+ val = val16 << 16;
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ (u8 *)&val, (u8 *)&mask);
+
+ mask = ~0xffffffff;
+ memcpy(&val, dev->dev_addr + 2, 4);
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
+ (u8 *)&val, (u8 *)&mask);
+ dev_put(dev);
+
+ return 0;
+}
+
+static int flow_offload_eth_dst(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ struct neighbour *n;
+ u32 mask, val;
+ u16 val16;
+
+ n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+ if (!n)
+ return -ENOENT;
+
+ mask = ~0xffffffff;
+ memcpy(&val, n->ha, 4);
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
+ (u8 *)&val, (u8 *)&mask);
+
+ mask = ~0x0000ffff;
+ memcpy(&val16, n->ha + 4, 2);
+ val = val16;
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ (u8 *)&val, (u8 *)&mask);
+ neigh_release(n);
+
+ return 0;
+}
+
+static void flow_offload_ipv4_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffffffff);
+ __be32 addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
+ offset = offsetof(struct iphdr, saddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
+ offset = offsetof(struct iphdr, daddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
+ (u8 *)&addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv4_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffffffff);
+ __be32 addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
+ offset = offsetof(struct iphdr, daddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
+ offset = offsetof(struct iphdr, saddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
+ (u8 *)&addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+ unsigned int offset,
+ u8 *addr, u8 *mask)
+{
+ struct flow_action_entry *entry;
+ int i;
+
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
+ entry = flow_action_entry_next(flow_rule);
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ offset + i,
+ &addr[i], mask);
+ }
+}
+
+static void flow_offload_ipv6_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ u32 mask = ~htonl(0xffffffff);
+ const u8 *addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, saddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, daddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv6_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ u32 mask = ~htonl(0xffffffff);
+ const u8 *addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, daddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, saddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+}
+
+static int flow_offload_l4proto(const struct flow_offload *flow)
+{
+ u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
+ u8 type = 0;
+
+ switch (protonum) {
+ case IPPROTO_TCP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
+ break;
+ case IPPROTO_UDP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
+ break;
+ default:
+ break;
+ }
+
+ return type;
+}
+
+static void flow_offload_port_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffff0000);
+ __be16 port;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ break;
+ default:
+ break;
+ }
+
+ flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
+ (u8 *)&port, (u8 *)&mask);
+}
+
+static void flow_offload_port_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffff);
+ __be16 port;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ break;
+ default:
+ break;
+ }
+
+ flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
+ (u8 *)&port, (u8 *)&mask);
+}
+
+static void flow_offload_ipv4_checksum(struct net *net,
+ const struct flow_offload *flow,
+ struct nf_flow_rule *flow_rule)
+{
+ u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+
+ switch (protonum) {
+ case IPPROTO_TCP:
+ entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
+ break;
+ case IPPROTO_UDP:
+ entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ }
+}
+
+static void flow_offload_redirect(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ struct rtable *rt;
+
+ rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+ entry->id = FLOW_ACTION_REDIRECT;
+ entry->dev = rt->dst.dev;
+ dev_hold(rt->dst.dev);
+}
+
+int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ flow_offload_ipv4_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_SNAT ||
+ flow->flags & FLOW_OFFLOAD_DNAT)
+ flow_offload_ipv4_checksum(net, flow, flow_rule);
+
+ flow_offload_redirect(flow, dir, flow_rule);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
+
+int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ flow_offload_ipv6_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+
+ flow_offload_redirect(flow, dir, flow_rule);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
+
+#define NF_FLOW_RULE_ACTION_MAX 16
+
+static struct nf_flow_rule *
+nf_flow_offload_rule_alloc(struct net *net,
+ const struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir)
+{
+ const struct nf_flowtable *flowtable = offload->flowtable;
+ const struct flow_offload *flow = offload->flow;
+ const struct flow_offload_tuple *tuple;
+ struct nf_flow_rule *flow_rule;
+ int err = -ENOMEM;
+
+ flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
+ if (!flow_rule)
+ goto err_flow;
+
+ flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
+ if (!flow_rule->rule)
+ goto err_flow_rule;
+
+ flow_rule->rule->match.dissector = &flow_rule->match.dissector;
+ flow_rule->rule->match.mask = &flow_rule->match.mask;
+ flow_rule->rule->match.key = &flow_rule->match.key;
+
+ tuple = &flow->tuplehash[dir].tuple;
+ err = nf_flow_rule_match(&flow_rule->match, tuple);
+ if (err < 0)
+ goto err_flow_match;
+
+ flow_rule->rule->action.num_entries = 0;
+ if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
+ goto err_flow_match;
+
+ return flow_rule;
+
+err_flow_match:
+ kfree(flow_rule->rule);
+err_flow_rule:
+ kfree(flow_rule);
+err_flow:
+ return NULL;
+}
+
+static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry;
+ int i;
+
+ for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
+ entry = &flow_rule->rule->action.entries[i];
+ if (entry->id != FLOW_ACTION_REDIRECT)
+ continue;
+
+ dev_put(entry->dev);
+ }
+ kfree(flow_rule->rule);
+ kfree(flow_rule);
+}
+
+static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
+{
+ int i;
+
+ for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
+ __nf_flow_offload_destroy(flow_rule[i]);
+}
+
+static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule[])
+{
+ struct net *net = read_pnet(&offload->flowtable->net);
+
+ flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
+ FLOW_OFFLOAD_DIR_ORIGINAL);
+ if (!flow_rule[0])
+ return -ENOMEM;
+
+ flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
+ FLOW_OFFLOAD_DIR_REPLY);
+ if (!flow_rule[1]) {
+ __nf_flow_offload_destroy(flow_rule[0]);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
+ __be16 proto, int priority,
+ enum flow_cls_command cmd,
+ const struct flow_offload_tuple *tuple,
+ struct netlink_ext_ack *extack)
+{
+ cls_flow->common.protocol = proto;
+ cls_flow->common.prio = priority;
+ cls_flow->common.extack = extack;
+ cls_flow->command = cmd;
+ cls_flow->cookie = (unsigned long)tuple;
+}
+
+static int flow_offload_tuple_add(struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule,
+ enum flow_offload_tuple_dir dir)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+ int err, i = 0;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_REPLACE,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+ cls_flow.rule = flow_rule->rule;
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
+ err = block_cb->cb(TC_SETUP_FT, &cls_flow,
+ block_cb->cb_priv);
+ if (err < 0)
+ continue;
+
+ i++;
+ }
+
+ return i;
+}
+
+static void flow_offload_tuple_del(struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_DESTROY,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
+ block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+
+ offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
+}
+
+static int flow_offload_rule_add(struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule[])
+{
+ int ok_count = 0;
+
+ ok_count += flow_offload_tuple_add(offload, flow_rule[0],
+ FLOW_OFFLOAD_DIR_ORIGINAL);
+ ok_count += flow_offload_tuple_add(offload, flow_rule[1],
+ FLOW_OFFLOAD_DIR_REPLY);
+ if (ok_count == 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int flow_offload_work_add(struct flow_offload_work *offload)
+{
+ struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
+ int err;
+
+ err = nf_flow_offload_alloc(offload, flow_rule);
+ if (err < 0)
+ return -ENOMEM;
+
+ err = flow_offload_rule_add(offload, flow_rule);
+
+ nf_flow_offload_destroy(flow_rule);
+
+ return err;
+}
+
+static void flow_offload_work_del(struct flow_offload_work *offload)
+{
+ flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+}
+
+static void flow_offload_tuple_stats(struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir,
+ struct flow_stats *stats)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_STATS,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
+ block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+ memcpy(stats, &cls_flow.stats, sizeof(*stats));
+}
+
+static void flow_offload_work_stats(struct flow_offload_work *offload)
+{
+ struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
+ u64 lastused;
+
+ flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
+ flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
+
+ lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
+ offload->flow->timeout = max_t(u64, offload->flow->timeout,
+ lastused + NF_FLOW_TIMEOUT);
+}
+
+static void flow_offload_work_handler(struct work_struct *work)
+{
+ struct flow_offload_work *offload, *next;
+ LIST_HEAD(offload_pending_list);
+ int ret;
+
+ spin_lock_bh(&flow_offload_pending_list_lock);
+ list_replace_init(&flow_offload_pending_list, &offload_pending_list);
+ spin_unlock_bh(&flow_offload_pending_list_lock);
+
+ list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
+ switch (offload->cmd) {
+ case FLOW_CLS_REPLACE:
+ ret = flow_offload_work_add(offload);
+ if (ret < 0)
+ offload->flow->flags &= ~FLOW_OFFLOAD_HW;
+ break;
+ case FLOW_CLS_DESTROY:
+ flow_offload_work_del(offload);
+ break;
+ case FLOW_CLS_STATS:
+ flow_offload_work_stats(offload);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
+
+static void flow_offload_queue_work(struct flow_offload_work *offload)
+{
+ spin_lock_bh(&flow_offload_pending_list_lock);
+ list_add_tail(&offload->list, &flow_offload_pending_list);
+ spin_unlock_bh(&flow_offload_pending_list_lock);
+
+ schedule_work(&nf_flow_offload_work);
+}
+
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+
+ offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_REPLACE;
+ offload->flow = flow;
+ offload->priority = flowtable->priority;
+ offload->flowtable = flowtable;
+ flow->flags |= FLOW_OFFLOAD_HW;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+
+ offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_DESTROY;
+ offload->flow = flow;
+ offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
+ offload->flowtable = flowtable;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+ s64 delta;
+
+ delta = flow->timeout - jiffies;
+ if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
+ flow->flags & FLOW_OFFLOAD_HW_DYING)
+ return;
+
+ offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_STATS;
+ offload->flow = flow;
+ offload->flowtable = flowtable;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
+{
+ if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ flush_work(&nf_flow_offload_work);
+}
+
+static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
+ struct flow_block_offload *bo,
+ enum flow_block_command cmd)
+{
+ struct flow_block_cb *block_cb, *next;
+ int err = 0;
+
+ switch (cmd) {
+ case FLOW_BLOCK_BIND:
+ list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd)
+{
+ struct netlink_ext_ack extack = {};
+ struct flow_block_offload bo = {};
+ int err;
+
+ if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
+ return 0;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ return -EOPNOTSUPP;
+
+ bo.net = dev_net(dev);
+ bo.block = &flowtable->flow_block;
+ bo.command = cmd;
+ bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
+ bo.extack = &extack;
+ INIT_LIST_HEAD(&bo.cb_list);
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+ if (err < 0)
+ return err;
+
+ return nf_flow_table_block_setup(flowtable, &bo, cmd);
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
+
+int nf_flow_table_offload_init(void)
+{
+ INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
+
+ return 0;
+}
+
+void nf_flow_table_offload_exit(void)
+{
+ struct flow_offload_work *offload, *next;
+ LIST_HEAD(offload_pending_list);
+
+ cancel_work_sync(&nf_flow_offload_work);
+
+ list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 13f09412cc6a..ff04cdc87f76 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -361,6 +361,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
{
+ struct nft_flow_rule *flow;
struct nft_trans *trans;
int err;
@@ -368,6 +369,16 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
if (trans == NULL)
return -ENOMEM;
+ if (ctx->chain->flags & NFT_CHAIN_HW_OFFLOAD) {
+ flow = nft_flow_rule_create(ctx->net, rule);
+ if (IS_ERR(flow)) {
+ nft_trans_destroy(trans);
+ return PTR_ERR(flow);
+ }
+
+ nft_trans_flow_rule(trans) = flow;
+ }
+
err = nf_tables_delrule_deactivate(ctx, rule);
if (err < 0) {
nft_trans_destroy(trans);
@@ -2177,6 +2188,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
+ flags |= chain->flags & NFT_BASE_CHAIN;
return nf_tables_updchain(&ctx, genmask, policy, flags);
}
@@ -5398,9 +5410,6 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
struct nft_trans *trans;
int err;
- if (!obj->ops->update)
- return -EOPNOTSUPP;
-
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
@@ -5837,6 +5846,7 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
.len = NFT_NAME_MAXLEN - 1 },
[NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED },
[NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
+ [NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
};
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
@@ -5965,13 +5975,22 @@ nft_flowtable_type_get(struct net *net, u8 family)
return ERR_PTR(-ENOENT);
}
+static void nft_unregister_flowtable_hook(struct net *net,
+ struct nft_flowtable *flowtable,
+ struct nft_hook *hook)
+{
+ nf_unregister_net_hook(net, &hook->ops);
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
+}
+
static void nft_unregister_flowtable_net_hooks(struct net *net,
struct nft_flowtable *flowtable)
{
struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list)
- nf_unregister_net_hook(net, &hook->ops);
+ nft_unregister_flowtable_hook(net, flowtable, hook);
}
static int nft_register_flowtable_net_hooks(struct net *net,
@@ -5993,10 +6012,20 @@ static int nft_register_flowtable_net_hooks(struct net *net,
}
}
- err = nf_register_net_hook(net, &hook->ops);
+ err = flowtable->data.type->setup(&flowtable->data,
+ hook->ops.dev,
+ FLOW_BLOCK_BIND);
if (err < 0)
goto err_unregister_net_hooks;
+ err = nf_register_net_hook(net, &hook->ops);
+ if (err < 0) {
+ flowtable->data.type->setup(&flowtable->data,
+ hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
+ goto err_unregister_net_hooks;
+ }
+
i++;
}
@@ -6007,7 +6036,7 @@ err_unregister_net_hooks:
if (i-- <= 0)
break;
- nf_unregister_net_hook(net, &hook->ops);
+ nft_unregister_flowtable_hook(net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
@@ -6082,6 +6111,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err2;
}
+ if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ flowtable->data.flags =
+ ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+ if (flowtable->data.flags & ~NF_FLOWTABLE_HW_OFFLOAD)
+ goto err3;
+ }
+
+ write_pnet(&flowtable->data.net, net);
flowtable->data.type = type;
err = type->init(&flowtable->data);
if (err < 0)
@@ -6106,7 +6143,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
return 0;
err5:
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
- nf_unregister_net_hook(net, &hook->ops);
+ nft_unregister_flowtable_hook(net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
@@ -6193,7 +6230,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
- NFTA_FLOWTABLE_PAD))
+ NFTA_FLOWTABLE_PAD) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
@@ -6450,7 +6488,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
if (hook->ops.dev != dev)
continue;
- nf_unregister_net_hook(dev_net(dev), &hook->ops);
+ nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
break;
@@ -6733,7 +6771,8 @@ static void nft_obj_commit_update(struct nft_trans *trans)
obj = nft_trans_obj(trans);
newobj = nft_trans_obj_newobj(trans);
- obj->ops->update(obj, newobj);
+ if (obj->ops->update)
+ obj->ops->update(obj, newobj);
kfree(newobj);
}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 93e27a6570bc..68f17a6921d8 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -159,9 +159,9 @@ static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
const struct nft_base_chain *basechain,
const struct nft_rule *rule,
const struct nft_flow_rule *flow,
+ struct netlink_ext_ack *extack,
enum flow_cls_command command)
{
- struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
memset(cls_flow, 0, sizeof(*cls_flow));
@@ -170,7 +170,7 @@ static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
proto = flow->proto;
nft_flow_offload_common_init(&cls_flow->common, proto,
- basechain->ops.priority, &extack);
+ basechain->ops.priority, extack);
cls_flow->command = command;
cls_flow->cookie = (unsigned long) rule;
if (flow)
@@ -182,6 +182,7 @@ static int nft_flow_offload_rule(struct nft_chain *chain,
struct nft_flow_rule *flow,
enum flow_cls_command command)
{
+ struct netlink_ext_ack extack = {};
struct flow_cls_offload cls_flow;
struct nft_base_chain *basechain;
@@ -189,7 +190,8 @@ static int nft_flow_offload_rule(struct nft_chain *chain,
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
- nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, command);
+ nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
+ command);
return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
&basechain->flow_block.cb_list);
@@ -207,13 +209,15 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo,
{
struct flow_block_cb *block_cb, *next;
struct flow_cls_offload cls_flow;
+ struct netlink_ext_ack extack;
struct nft_chain *chain;
struct nft_rule *rule;
chain = &basechain->chain;
list_for_each_entry(rule, &chain->rules, list) {
+ memset(&extack, 0, sizeof(extack));
nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
- FLOW_CLS_DESTROY);
+ &extack, FLOW_CLS_DESTROY);
nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
}
@@ -385,6 +389,55 @@ static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
return nft_flow_block_chain(basechain, NULL, cmd);
}
+static void nft_flow_rule_offload_abort(struct net *net,
+ struct nft_trans *trans)
+{
+ int err = 0;
+
+ list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
+ if (trans->ctx.family != NFPROTO_NETDEV)
+ continue;
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWCHAIN:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ nft_trans_chain_update(trans))
+ continue;
+
+ err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ FLOW_BLOCK_UNBIND);
+ break;
+ case NFT_MSG_DELCHAIN:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ FLOW_BLOCK_BIND);
+ break;
+ case NFT_MSG_NEWRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ NULL, FLOW_CLS_DESTROY);
+ break;
+ case NFT_MSG_DELRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ nft_trans_flow_rule(trans),
+ FLOW_CLS_REPLACE);
+ break;
+ }
+
+ if (WARN_ON_ONCE(err))
+ break;
+ }
+}
+
int nft_flow_rule_offload_commit(struct net *net)
{
struct nft_trans *trans;
@@ -397,7 +450,8 @@ int nft_flow_rule_offload_commit(struct net *net)
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ nft_trans_chain_update(trans))
continue;
policy = nft_trans_chain_policy(trans);
@@ -417,14 +471,14 @@ int nft_flow_rule_offload_commit(struct net *net)
continue;
if (trans->ctx.flags & NLM_F_REPLACE ||
- !(trans->ctx.flags & NLM_F_APPEND))
- return -EOPNOTSUPP;
-
+ !(trans->ctx.flags & NLM_F_APPEND)) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
@@ -432,13 +486,31 @@ int nft_flow_rule_offload_commit(struct net *net)
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
- nft_trans_flow_rule(trans),
- FLOW_CLS_DESTROY);
+ NULL, FLOW_CLS_DESTROY);
break;
}
- if (err)
- return err;
+ if (err) {
+ nft_flow_rule_offload_abort(net, trans);
+ break;
+ }
+ }
+
+ list_for_each_entry(trans, &net->nft.commit_list, list) {
+ if (trans->ctx.family != NFPROTO_NETDEV)
+ continue;
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWRULE:
+ case NFT_MSG_DELRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ break;
+ default:
+ break;
+ }
}
return err;
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 974300178fa9..02afa752dd2e 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -134,12 +134,13 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
+ struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
- priv->sreg != priv->dreg)
+ priv->sreg != priv->dreg || priv->len != reg->len)
return -EOPNOTSUPP;
- memcpy(&ctx->regs[priv->dreg].mask, &priv->mask, sizeof(priv->mask));
+ memcpy(&reg->mask, &priv->mask, sizeof(priv->mask));
return 0;
}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index bd173b1824c6..b8092069f868 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
+#include <linux/if_arp.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables_offload.h>
@@ -116,7 +117,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
u8 *mask = (u8 *)&flow->match.mask;
u8 *key = (u8 *)&flow->match.key;
- if (priv->op != NFT_CMP_EQ)
+ if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
return -EOPNOTSUPP;
memcpy(key + reg->offset, &priv->data, priv->len);
@@ -125,6 +126,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
flow->match.dissector.used_keys |= BIT(reg->key);
flow->match.dissector.offset[reg->key] = reg->base_offset;
+ if (reg->key == FLOW_DISSECTOR_KEY_META &&
+ reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
+ nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
+ return -EOPNOTSUPP;
+
nft_offload_update_dependency(ctx, &priv->data, priv->len);
return 0;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index f29bbc74c4bf..dd82ff2ee19f 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -115,10 +115,13 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
if (nft_flow_route(pkt, ct, &route, dir) < 0)
goto err_flow_route;
- flow = flow_offload_alloc(ct, &route);
+ flow = flow_offload_alloc(ct);
if (!flow)
goto err_flow_alloc;
+ if (flow_offload_route_init(flow, &route) < 0)
+ goto err_flow_add;
+
if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 317e3a9e8c5b..9740b554fdb3 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -33,19 +33,19 @@
static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
-static u8 nft_meta_weekday(unsigned long secs)
+static u8 nft_meta_weekday(time64_t secs)
{
unsigned int dse;
u8 wday;
secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest;
- dse = secs / NFT_META_SECS_PER_DAY;
+ dse = div_u64(secs, NFT_META_SECS_PER_DAY);
wday = (4 + dse) % NFT_META_DAYS_PER_WEEK;
return wday;
}
-static u32 nft_meta_hour(unsigned long secs)
+static u32 nft_meta_hour(time64_t secs)
{
struct tm tm;
@@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr,
nft_reg_store64(dest, ktime_get_real_ns());
break;
case NFT_META_TIME_DAY:
- nft_reg_store8(dest, nft_meta_weekday(get_seconds()));
+ nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds()));
break;
case NFT_META_TIME_HOUR:
- *dest = nft_meta_hour(get_seconds());
+ *dest = nft_meta_hour(ktime_get_real_seconds());
break;
default:
WARN_ON(1);
@@ -547,6 +547,14 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
break;
+ case NFT_META_IIF:
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+ ingress_ifindex, sizeof(__u32), reg);
+ break;
+ case NFT_META_IIFTYPE:
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+ ingress_iftype, sizeof(__u16), reg);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 5cb2d8908d2a..1993af3a2979 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -23,50 +23,58 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
+static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
+ struct vlan_ethhdr *veth)
+{
+ if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
+ return false;
+
+ veth->h_vlan_proto = skb->vlan_proto;
+ veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+
+ return true;
+}
+
/* add vlan header into the user buffer for if tag was removed by offloads */
static bool
nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
{
int mac_off = skb_mac_header(skb) - skb->data;
- u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+ u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
+ u8 vlan_hlen = 0;
+
+ if ((skb->protocol == htons(ETH_P_8021AD) ||
+ skb->protocol == htons(ETH_P_8021Q)) &&
+ offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+ vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < ETH_HLEN) {
- u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+ if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ u8 ethlen = len;
- if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+ if (vlan_hlen &&
+ skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+ return false;
+ else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- veth.h_vlan_proto = skb->vlan_proto;
+ if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+ ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
- memcpy(dst_u8, vlanh + offset, ethlen);
+ memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN;
- } else if (offset >= VLAN_ETH_HLEN) {
- offset -= VLAN_HLEN;
- goto skip;
+ offset = ETH_HLEN + vlan_hlen;
+ } else {
+ offset -= VLAN_HLEN + vlan_hlen;
}
- veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
- veth.h_vlan_encapsulated_proto = skb->protocol;
-
- vlanh += offset;
-
- vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
- memcpy(dst_u8, vlanh, vlan_len);
-
- len -= vlan_len;
- if (!len)
- return true;
-
- dst_u8 += vlan_len;
- skip:
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
@@ -174,6 +182,44 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
dst, ETH_ALEN, reg);
break;
+ case offsetof(struct ethhdr, h_proto):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
+ n_proto, sizeof(__be16), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+ vlan_tci, sizeof(__be16), reg);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+ vlan_tpid, sizeof(__be16), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+ vlan_tci, sizeof(__be16), reg);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
+ sizeof(struct vlan_hdr):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+ vlan_tpid, sizeof(__be16), reg);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index be7798a50546..713fb38541df 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -239,11 +239,7 @@ static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
return 0;
/* Error message? */
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_SOURCE_QUENCH &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB &&
- icmph->type != ICMP_REDIRECT)
+ if (!icmp_is_err(icmph->type))
return 0;
*nhoff += iphsz + sizeof(_ih);
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 8dbb4d48f2ed..67cb98489415 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -77,12 +77,12 @@ static inline bool is_leap(unsigned int y)
* This is done in three separate functions so that the most expensive
* calculations are done last, in case a "simple match" can be found earlier.
*/
-static inline unsigned int localtime_1(struct xtm *r, time_t time)
+static inline unsigned int localtime_1(struct xtm *r, time64_t time)
{
unsigned int v, w;
/* Each day has 86400s, so finding the hour/minute is actually easy. */
- v = time % SECONDS_PER_DAY;
+ div_u64_rem(time, SECONDS_PER_DAY, &v);
r->second = v % 60;
w = v / 60;
r->minute = w % 60;
@@ -90,13 +90,13 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time)
return v;
}
-static inline void localtime_2(struct xtm *r, time_t time)
+static inline void localtime_2(struct xtm *r, time64_t time)
{
/*
* Here comes the rest (weekday, monthday). First, divide the SSTE
* by seconds-per-day to get the number of _days_ since the epoch.
*/
- r->dse = time / 86400;
+ r->dse = div_u64(time, SECONDS_PER_DAY);
/*
* 1970-01-01 (w=0) was a Thursday (4).
@@ -105,7 +105,7 @@ static inline void localtime_2(struct xtm *r, time_t time)
r->weekday = (4 + r->dse - 1) % 7 + 1;
}
-static void localtime_3(struct xtm *r, time_t time)
+static void localtime_3(struct xtm *r, time64_t time)
{
unsigned int year, i, w = r->dse;
@@ -160,7 +160,7 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_time_info *info = par->matchinfo;
unsigned int packet_time;
struct xtm current_time;
- s64 stamp;
+ time64_t stamp;
/*
* We need real time here, but we can neither use skb->tstamp
@@ -173,14 +173,14 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
* 1. match before 13:00
* 2. match after 13:00
*
- * If you match against processing time (get_seconds) it
+ * If you match against processing time (ktime_get_real_seconds) it
* may happen that the same packet matches both rules if
* it arrived at the right moment before 13:00, so it would be
* better to check skb->tstamp and set it via __net_timestamp()
* if needed. This however breaks outgoing packets tx timestamp,
* and causes them to get delayed forever by fq packet scheduler.
*/
- stamp = get_seconds();
+ stamp = ktime_get_real_seconds();
if (info->flags & XT_TIME_LOCAL_TZ)
/* Adjust for local timezone */
@@ -193,6 +193,9 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
* - 'now' is in the weekday mask
* - 'now' is in the daytime range time_start..time_end
* (and by default, libxt_time will set these so as to match)
+ *
+ * note: info->date_start/stop are unsigned 32-bit values that
+ * can hold values beyond y2038, but not after y2106.
*/
if (stamp < info->date_start || stamp > info->date_stop)
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index fd9ad534dd9b..eee0dddb7749 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1091,7 +1091,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev);
if (!local) {
- nfc_put_device(dev);
rc = -ENODEV;
goto exit;
}
@@ -1151,7 +1150,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev);
if (!local) {
- nfc_put_device(dev);
rc = -ENODEV;
goto exit;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 1c77f520f474..12936c151cc0 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -200,7 +200,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
if (err)
return err;
- flow_key->mpls.top_lse = lse;
+ flow_key->mpls.lse[0] = lse;
return 0;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2088619c03f0..93d4991ddc1f 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -350,7 +350,8 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
- + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
+ + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
+ + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
/* OVS_PACKET_ATTR_USERDATA */
if (upcall_info->userdata)
@@ -393,6 +394,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
size_t len;
unsigned int hlen;
int err, dp_ifindex;
+ u64 hash;
dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex)
@@ -485,23 +487,30 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
}
/* Add OVS_PACKET_ATTR_MRU */
- if (upcall_info->mru) {
- if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
- upcall_info->mru)) {
- err = -ENOBUFS;
- goto out;
- }
- pad_packet(dp, user_skb);
+ if (upcall_info->mru &&
+ nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
+ err = -ENOBUFS;
+ goto out;
}
/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
- if (cutlen > 0) {
- if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
- skb->len)) {
- err = -ENOBUFS;
- goto out;
- }
- pad_packet(dp, user_skb);
+ if (cutlen > 0 &&
+ nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
+ err = -ENOBUFS;
+ goto out;
+ }
+
+ /* Add OVS_PACKET_ATTR_HASH */
+ hash = skb_get_hash_raw(skb);
+ if (skb->sw_hash)
+ hash |= OVS_PACKET_HASH_SW_BIT;
+
+ if (skb->l4_hash)
+ hash |= OVS_PACKET_HASH_L4_BIT;
+
+ if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
+ err = -ENOBUFS;
+ goto out;
}
/* Only reserve room for attribute header, packet data is added
@@ -543,6 +552,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct vport *input_vport;
u16 mru = 0;
+ u64 hash;
int len;
int err;
bool log = !a[OVS_PACKET_ATTR_PROBE];
@@ -568,6 +578,14 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
}
OVS_CB(packet)->mru = mru;
+ if (a[OVS_PACKET_ATTR_HASH]) {
+ hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
+
+ __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
+ !!(hash & OVS_PACKET_HASH_SW_BIT),
+ !!(hash & OVS_PACKET_HASH_L4_BIT));
+ }
+
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 81e85dde8217..e239a46c2f94 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -139,6 +139,18 @@ struct ovs_net {
bool xt_label;
};
+/**
+ * enum ovs_pkt_hash_types - hash info to include with a packet
+ * to send to userspace.
+ * @OVS_PACKET_HASH_SW_BIT: indicates hash was computed in software stack.
+ * @OVS_PACKET_HASH_L4_BIT: indicates hash is a canonical 4-tuple hash
+ * over transport ports.
+ */
+enum ovs_pkt_hash_types {
+ OVS_PACKET_HASH_SW_BIT = (1ULL << 32),
+ OVS_PACKET_HASH_L4_BIT = (1ULL << 33),
+};
+
extern unsigned int ovs_net_id;
void ovs_lock(void);
void ovs_unlock(void);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 38147e6a20f5..9d375e74b607 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -637,27 +637,35 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (eth_p_mpls(key->eth.type)) {
- size_t stack_len = MPLS_HLEN;
+ u8 label_count = 1;
+ memset(&key->mpls, 0, sizeof(key->mpls));
skb_set_inner_network_header(skb, skb->mac_len);
while (1) {
__be32 lse;
- error = check_header(skb, skb->mac_len + stack_len);
+ error = check_header(skb, skb->mac_len +
+ label_count * MPLS_HLEN);
if (unlikely(error))
return 0;
memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
- if (stack_len == MPLS_HLEN)
- memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
+ if (label_count <= MPLS_LABEL_DEPTH)
+ memcpy(&key->mpls.lse[label_count - 1], &lse,
+ MPLS_HLEN);
- skb_set_inner_network_header(skb, skb->mac_len + stack_len);
+ skb_set_inner_network_header(skb, skb->mac_len +
+ label_count * MPLS_HLEN);
if (lse & htonl(MPLS_LS_S_MASK))
break;
- stack_len += MPLS_HLEN;
+ label_count++;
}
+ if (label_count > MPLS_LABEL_DEPTH)
+ label_count = MPLS_LABEL_DEPTH;
+
+ key->mpls.num_labels_mask = GENMASK(label_count - 1, 0);
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 8080518ca5f2..fd8ed766bdd1 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -30,6 +30,7 @@ enum sw_flow_mac_proto {
MAC_PROTO_ETHERNET,
};
#define SW_FLOW_KEY_INVALID 0x80
+#define MPLS_LABEL_DEPTH 3
/* Store options at the end of the array if they are less than the
* maximum size. This allows us to get the benefits of variable length
@@ -85,9 +86,6 @@ struct sw_flow_key {
*/
union {
struct {
- __be32 top_lse; /* top label stack entry */
- } mpls;
- struct {
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
u8 tos; /* IP ToS. */
u8 ttl; /* IP TTL/hop limit. */
@@ -135,6 +133,11 @@ struct sw_flow_key {
} nd;
};
} ipv6;
+ struct {
+ u32 num_labels_mask; /* labels present bitmap of effective length MPLS_LABEL_DEPTH */
+ __be32 lse[MPLS_LABEL_DEPTH]; /* label stack entry */
+ } mpls;
+
struct ovs_key_nsh nsh; /* network service header */
};
struct {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d7559c64795d..65c2e3458ff5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -424,7 +424,7 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
.next = ovs_tunnel_key_lens, },
- [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
+ [OVS_KEY_ATTR_MPLS] = { .len = OVS_ATTR_VARIABLE },
[OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
[OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
@@ -1628,10 +1628,25 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
const struct ovs_key_mpls *mpls_key;
+ u32 hdr_len;
+ u32 label_count, label_count_mask, i;
mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
- SW_FLOW_KEY_PUT(match, mpls.top_lse,
- mpls_key->mpls_lse, is_mask);
+ hdr_len = nla_len(a[OVS_KEY_ATTR_MPLS]);
+ label_count = hdr_len / sizeof(struct ovs_key_mpls);
+
+ if (label_count == 0 || label_count > MPLS_LABEL_DEPTH ||
+ hdr_len % sizeof(struct ovs_key_mpls))
+ return -EINVAL;
+
+ label_count_mask = GENMASK(label_count - 1, 0);
+
+ for (i = 0 ; i < label_count; i++)
+ SW_FLOW_KEY_PUT(match, mpls.lse[i],
+ mpls_key[i].mpls_lse, is_mask);
+
+ SW_FLOW_KEY_PUT(match, mpls.num_labels_mask,
+ label_count_mask, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
}
@@ -2114,13 +2129,18 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
} else if (eth_p_mpls(swkey->eth.type)) {
+ u8 i, num_labels;
struct ovs_key_mpls *mpls_key;
- nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
+ num_labels = hweight_long(output->mpls.num_labels_mask);
+ nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS,
+ num_labels * sizeof(*mpls_key));
if (!nla)
goto nla_put_failure;
+
mpls_key = nla_data(nla);
- mpls_key->mpls_lse = output->mpls.top_lse;
+ for (i = 0; i < num_labels; i++)
+ mpls_key[i].mpls_lse = output->mpls.lse[i];
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -2406,13 +2426,14 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa,
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
- __be16 eth_type, __be16 vlan_tci, bool log);
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count, bool log);
static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- bool log, bool last)
+ u32 mpls_label_count, bool log, bool last)
{
const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
const struct nlattr *probability, *actions;
@@ -2463,7 +2484,7 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
return err;
err = __ovs_nla_copy_actions(net, actions, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2478,7 +2499,7 @@ static int validate_and_copy_clone(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- bool log, bool last)
+ u32 mpls_label_count, bool log, bool last)
{
int start, err;
u32 exec;
@@ -2498,7 +2519,7 @@ static int validate_and_copy_clone(struct net *net,
return err;
err = __ovs_nla_copy_actions(net, attr, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2864,6 +2885,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count,
bool log, bool last)
{
const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
@@ -2912,7 +2934,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2925,7 +2947,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2952,7 +2974,8 @@ static int copy_action(const struct nlattr *from,
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
- __be16 eth_type, __be16 vlan_tci, bool log)
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count, bool log)
{
u8 mac_proto = ovs_key_mac_proto(key);
const struct nlattr *a;
@@ -3065,25 +3088,36 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
!eth_p_mpls(eth_type)))
return -EINVAL;
eth_type = mpls->mpls_ethertype;
+ mpls_label_count++;
break;
}
- case OVS_ACTION_ATTR_POP_MPLS:
+ case OVS_ACTION_ATTR_POP_MPLS: {
+ __be16 proto;
if (vlan_tci & htons(VLAN_CFI_MASK) ||
!eth_p_mpls(eth_type))
return -EINVAL;
- /* Disallow subsequent L2.5+ set and mpls_pop actions
- * as there is no check here to ensure that the new
- * eth_type is valid and thus set actions could
- * write off the end of the packet or otherwise
- * corrupt it.
+ /* Disallow subsequent L2.5+ set actions and mpls_pop
+ * actions once the last MPLS label in the packet is
+ * is popped as there is no check here to ensure that
+ * the new eth type is valid and thus set actions could
+ * write off the end of the packet or otherwise corrupt
+ * it.
*
* Support for these actions is planned using packet
* recirculation.
*/
- eth_type = htons(0);
+ proto = nla_get_be16(a);
+ mpls_label_count--;
+
+ if (!eth_p_mpls(proto) || !mpls_label_count)
+ eth_type = htons(0);
+ else
+ eth_type = proto;
+
break;
+ }
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
@@ -3106,6 +3140,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_sample(net, a, key, sfa,
eth_type, vlan_tci,
+ mpls_label_count,
log, last);
if (err)
return err;
@@ -3176,6 +3211,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_clone(net, a, key, sfa,
eth_type, vlan_tci,
+ mpls_label_count,
log, last);
if (err)
return err;
@@ -3188,8 +3224,9 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_check_pkt_len(net, a, key, sfa,
eth_type,
- vlan_tci, log,
- last);
+ vlan_tci,
+ mpls_label_count,
+ log, last);
if (err)
return err;
skip_copy = true;
@@ -3219,14 +3256,18 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
int err;
+ u32 mpls_label_count = 0;
*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
+ if (eth_p_mpls(key->eth.type))
+ mpls_label_count = hweight_long(key->mpls.num_labels_mask);
+
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
- key->eth.vlan.tci, log);
+ key->eth.vlan.tci, mpls_label_count, log);
if (err)
ovs_nla_free_flow_actions(*sfa);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 3fc38d16c456..5da9392b03d6 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -403,8 +403,9 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
ids = rcu_dereference(vport->upcall_portids);
- if (ids->n_ids == 1 && ids->ids[0] == 0)
- return 0;
+ /* If there is only one portid, select it in the fast-path. */
+ if (ids->n_ids == 1)
+ return ids->ids[0];
hash = skb_get_hash(skb);
ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a50e850245..53c1d41fb1c9 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1295,15 +1295,21 @@ static void packet_sock_destruct(struct sock *sk)
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
- u32 rxhash;
+ u32 *history = po->rollover->history;
+ u32 victim, rxhash;
int i, count = 0;
rxhash = skb_get_hash(skb);
for (i = 0; i < ROLLOVER_HLEN; i++)
- if (po->rollover->history[i] == rxhash)
+ if (READ_ONCE(history[i]) == rxhash)
count++;
- po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+ victim = prandom_u32() % ROLLOVER_HLEN;
+
+ /* Avoid dirtying the cache line if possible */
+ if (READ_ONCE(history[victim]) != rxhash)
+ WRITE_ONCE(history[victim], rxhash);
+
return count > (ROLLOVER_HLEN >> 1);
}
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 6b345c858dba..c71f4328d138 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -513,6 +513,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
+ unsigned long max_wrs;
int ret, fr_queue_space;
struct dma_pool *pool;
@@ -533,10 +534,15 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
- if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
- if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
+ if (ic->i_send_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
+
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
+ if (ic->i_recv_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
@@ -1176,8 +1182,9 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_flowctl = 0;
atomic_set(&ic->i_credits, 0);
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ /* Re-init rings, but retain sizes. */
+ rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
+ rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
if (ic->i_ibinc) {
rds_inc_put(&ic->i_ibinc->ii_inc);
@@ -1224,8 +1231,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
* rds_ib_conn_shutdown() waits for these to be emptied so they
* must be initialized before it can be called.
*/
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ rds_ib_ring_init(&ic->i_send_ring, 0);
+ rds_ib_ring_init(&ic->i_recv_ring, 0);
ic->conn = conn;
conn->c_transport_data = ic;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a0df7c8a939..46b8ff24020d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -906,7 +906,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
out_release:
release_sock(sk);
@@ -1011,7 +1011,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
rose_insert_socket(make);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 6284c552e943..7fc1e2c1b656 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -188,6 +188,8 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
+ nla_total_size(0) /* TCA_ACT_STATS nested */
/* TCA_STATS_BASIC */
+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
+ /* TCA_STATS_PKT64 */
+ + nla_total_size_64bit(sizeof(u64))
/* TCA_STATS_QUEUE */
+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
+ nla_total_size(0) /* TCA_OPTIONS nested */
@@ -1001,7 +1003,6 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
err = PTR_ERR(act);
goto err;
}
- act->order = i;
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 88a1b79a1848..855a6fa16a62 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -206,9 +206,7 @@ static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
icmph = (void *)(skb_network_header(skb) + ihl);
- if ((icmph->type != ICMP_DEST_UNREACH) &&
- (icmph->type != ICMP_TIME_EXCEEDED) &&
- (icmph->type != ICMP_PARAMETERPROB))
+ if (!icmp_is_err(icmph->type))
break;
if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97639b259cd7..9813ca4006dd 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -35,7 +35,7 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
*/
- pr_info("simple: %s_%d\n",
+ pr_info("simple: %s_%llu\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
return d->tcf_action;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8717c0b26c90..20d60b8fcb70 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
+#include <linux/jhash.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -47,6 +48,62 @@ static LIST_HEAD(tcf_proto_base);
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(cls_mod_lock);
+static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
+{
+ return jhash_3words(tp->chain->index, tp->prio,
+ (__force __u32)tp->protocol, 0);
+}
+
+static void tcf_proto_signal_destroying(struct tcf_chain *chain,
+ struct tcf_proto *tp)
+{
+ struct tcf_block *block = chain->block;
+
+ mutex_lock(&block->proto_destroy_lock);
+ hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
+ destroy_obj_hashfn(tp));
+ mutex_unlock(&block->proto_destroy_lock);
+}
+
+static bool tcf_proto_cmp(const struct tcf_proto *tp1,
+ const struct tcf_proto *tp2)
+{
+ return tp1->chain->index == tp2->chain->index &&
+ tp1->prio == tp2->prio &&
+ tp1->protocol == tp2->protocol;
+}
+
+static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
+ struct tcf_proto *tp)
+{
+ u32 hash = destroy_obj_hashfn(tp);
+ struct tcf_proto *iter;
+ bool found = false;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
+ destroy_ht_node, hash) {
+ if (tcf_proto_cmp(tp, iter)) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return found;
+}
+
+static void
+tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
+{
+ struct tcf_block *block = chain->block;
+
+ mutex_lock(&block->proto_destroy_lock);
+ if (hash_hashed(&tp->destroy_ht_node))
+ hash_del_rcu(&tp->destroy_ht_node);
+ mutex_unlock(&block->proto_destroy_lock);
+}
+
/* Find classifier type by string name */
static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
@@ -234,9 +291,11 @@ static void tcf_proto_get(struct tcf_proto *tp)
static void tcf_chain_put(struct tcf_chain *chain);
static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ bool sig_destroy, struct netlink_ext_ack *extack)
{
tp->ops->destroy(tp, rtnl_held, extack);
+ if (sig_destroy)
+ tcf_proto_signal_destroyed(tp->chain, tp);
tcf_chain_put(tp->chain);
module_put(tp->ops->owner);
kfree_rcu(tp, rcu);
@@ -246,7 +305,7 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
if (refcount_dec_and_test(&tp->refcnt))
- tcf_proto_destroy(tp, rtnl_held, extack);
+ tcf_proto_destroy(tp, rtnl_held, true, extack);
}
static int walker_check_empty(struct tcf_proto *tp, void *fh,
@@ -370,6 +429,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
static void tcf_block_destroy(struct tcf_block *block)
{
mutex_destroy(&block->lock);
+ mutex_destroy(&block->proto_destroy_lock);
kfree_rcu(block, rcu);
}
@@ -545,6 +605,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_dereference(chain->filter_chain, chain);
+ while (tp) {
+ tp_next = rcu_dereference_protected(tp->next, 1);
+ tcf_proto_signal_destroying(chain, tp);
+ tp = tp_next;
+ }
+ tp = tcf_chain_dereference(chain->filter_chain, chain);
RCU_INIT_POINTER(chain->filter_chain, NULL);
tcf_chain0_head_change(chain, NULL);
chain->flushing = true;
@@ -844,6 +910,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
return ERR_PTR(-ENOMEM);
}
mutex_init(&block->lock);
+ mutex_init(&block->proto_destroy_lock);
init_rwsem(&block->cb_lock);
flow_block_init(&block->flow_block);
INIT_LIST_HEAD(&block->chain_list);
@@ -1621,6 +1688,12 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
mutex_lock(&chain->filter_chain_lock);
+ if (tcf_proto_exists_destroying(chain, tp_new)) {
+ mutex_unlock(&chain->filter_chain_lock);
+ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
+ return ERR_PTR(-EAGAIN);
+ }
+
tp = tcf_chain_tp_find(chain, &chain_info,
protocol, prio, false);
if (!tp)
@@ -1628,10 +1701,10 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
mutex_unlock(&chain->filter_chain_lock);
if (tp) {
- tcf_proto_destroy(tp_new, rtnl_held, NULL);
+ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
tp_new = tp;
} else if (err) {
- tcf_proto_destroy(tp_new, rtnl_held, NULL);
+ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
tp_new = ERR_PTR(err);
}
@@ -1669,6 +1742,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
return;
}
+ tcf_proto_signal_destroying(chain, tp);
next = tcf_chain_dereference(chain_info.next, chain);
if (tp == chain->filter_chain)
tcf_chain0_head_change(chain, next);
@@ -2188,6 +2262,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
err = -EINVAL;
goto errout_locked;
} else if (t->tcm_handle == 0) {
+ tcf_proto_signal_destroying(chain, tp);
tcf_chain_tp_remove(chain, &chain_info, tp);
mutex_unlock(&chain->filter_chain_lock);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3177dcb17316..d99966a55c84 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -521,7 +521,7 @@ META_COLLECTOR(int_sk_ack_bl)
*err = -1;
return;
}
- dst->value = sk->sk_ack_backlog;
+ dst->value = READ_ONCE(sk->sk_ack_backlog);
}
META_COLLECTOR(int_sk_max_ack_bl)
@@ -532,7 +532,7 @@ META_COLLECTOR(int_sk_max_ack_bl)
*err = -1;
return;
}
- dst->value = sk->sk_max_ack_backlog;
+ dst->value = READ_ONCE(sk->sk_max_ack_backlog);
}
META_COLLECTOR(int_sk_prio)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8561e825f401..5ab696efca95 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -652,7 +652,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
} else {
- qdisc->empty = true;
+ WRITE_ONCE(qdisc->empty, true);
}
return skb;
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index df98a887eb89..b0b0dc46af61 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -22,6 +22,7 @@
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
+#define DTIME_INVALID 0xffffffffffffffff
#define MAX_PROB 0xffffffffffffffff
#define PIE_SCALE 8
@@ -34,6 +35,7 @@ struct pie_params {
u32 beta; /* and are used for shift relative to 1 */
bool ecn; /* true if ecn is enabled */
bool bytemode; /* to scale drop early prob based on pkt size */
+ u8 dq_rate_estimator; /* to calculate delay using Little's law */
};
/* variables used */
@@ -77,11 +79,34 @@ static void pie_params_init(struct pie_params *params)
params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
params->ecn = false;
params->bytemode = false;
+ params->dq_rate_estimator = false;
+}
+
+/* private skb vars */
+struct pie_skb_cb {
+ psched_time_t enqueue_time;
+};
+
+static struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
+ return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_pie_cb(skb)->enqueue_time;
+}
+
+static void pie_set_enqueue_time(struct sk_buff *skb)
+{
+ get_pie_cb(skb)->enqueue_time = psched_get_time();
}
static void pie_vars_init(struct pie_vars *vars)
{
vars->dq_count = DQCOUNT_INVALID;
+ vars->dq_tstamp = DTIME_INVALID;
vars->accu_prob = 0;
vars->avg_dq_rate = 0;
/* default of 150 ms in pschedtime */
@@ -172,6 +197,10 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* we can enqueue the packet */
if (enqueue) {
+ /* Set enqueue time only when dq_rate_estimator is disabled. */
+ if (!q->params.dq_rate_estimator)
+ pie_set_enqueue_time(skb);
+
q->stats.packets_in++;
if (qdisc_qlen(sch) > q->stats.maxq)
q->stats.maxq = qdisc_qlen(sch);
@@ -194,6 +223,7 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
[TCA_PIE_BETA] = {.type = NLA_U32},
[TCA_PIE_ECN] = {.type = NLA_U32},
[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
+ [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
};
static int pie_change(struct Qdisc *sch, struct nlattr *opt,
@@ -247,6 +277,10 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_PIE_BYTEMODE])
q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
+ if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
+ q->params.dq_rate_estimator =
+ nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
+
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
@@ -266,6 +300,28 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
{
struct pie_sched_data *q = qdisc_priv(sch);
int qlen = sch->qstats.backlog; /* current queue size in bytes */
+ psched_time_t now = psched_get_time();
+ u32 dtime = 0;
+
+ /* If dq_rate_estimator is disabled, calculate qdelay using the
+ * packet timestamp.
+ */
+ if (!q->params.dq_rate_estimator) {
+ q->vars.qdelay = now - pie_get_enqueue_time(skb);
+
+ if (q->vars.dq_tstamp != DTIME_INVALID)
+ dtime = now - q->vars.dq_tstamp;
+
+ q->vars.dq_tstamp = now;
+
+ if (qlen == 0)
+ q->vars.qdelay = 0;
+
+ if (dtime == 0)
+ return;
+
+ goto burst_allowance_reduction;
+ }
/* If current queue is about 10 packets or more and dq_count is unset
* we have enough packets to calculate the drain rate. Save
@@ -289,10 +345,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
q->vars.dq_count += skb->len;
if (q->vars.dq_count >= QUEUE_THRESHOLD) {
- psched_time_t now = psched_get_time();
- u32 dtime = now - q->vars.dq_tstamp;
u32 count = q->vars.dq_count << PIE_SCALE;
+ dtime = now - q->vars.dq_tstamp;
+
if (dtime == 0)
return;
@@ -317,14 +373,19 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
q->vars.dq_tstamp = psched_get_time();
}
- if (q->vars.burst_time > 0) {
- if (q->vars.burst_time > dtime)
- q->vars.burst_time -= dtime;
- else
- q->vars.burst_time = 0;
- }
+ goto burst_allowance_reduction;
}
}
+
+ return;
+
+burst_allowance_reduction:
+ if (q->vars.burst_time > 0) {
+ if (q->vars.burst_time > dtime)
+ q->vars.burst_time -= dtime;
+ else
+ q->vars.burst_time = 0;
+ }
}
static void calculate_probability(struct Qdisc *sch)
@@ -332,19 +393,25 @@ static void calculate_probability(struct Qdisc *sch)
struct pie_sched_data *q = qdisc_priv(sch);
u32 qlen = sch->qstats.backlog; /* queue size in bytes */
psched_time_t qdelay = 0; /* in pschedtime */
- psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
+ psched_time_t qdelay_old = 0; /* in pschedtime */
s64 delta = 0; /* determines the change in probability */
u64 oldprob;
u64 alpha, beta;
u32 power;
bool update_prob = true;
- q->vars.qdelay_old = q->vars.qdelay;
+ if (q->params.dq_rate_estimator) {
+ qdelay_old = q->vars.qdelay;
+ q->vars.qdelay_old = q->vars.qdelay;
- if (q->vars.avg_dq_rate > 0)
- qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
- else
- qdelay = 0;
+ if (q->vars.avg_dq_rate > 0)
+ qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
+ else
+ qdelay = 0;
+ } else {
+ qdelay = q->vars.qdelay;
+ qdelay_old = q->vars.qdelay_old;
+ }
/* If qdelay is zero and qlen is not, it means qlen is very small, less
* than dequeue_rate, so we do not update probabilty in this round
@@ -430,14 +497,18 @@ static void calculate_probability(struct Qdisc *sch)
/* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods
* 2. Calculated drop probability is zero
- * 3. We have atleast one estimate for the avg_dq_rate ie.,
- * is a non-zero value
+ * 3. If average dq_rate_estimator is enabled, we have atleast one
+ * estimate for the avg_dq_rate ie., is a non-zero value
*/
if ((q->vars.qdelay < q->params.target / 2) &&
(q->vars.qdelay_old < q->params.target / 2) &&
q->vars.prob == 0 &&
- q->vars.avg_dq_rate > 0)
+ (!q->params.dq_rate_estimator || q->vars.avg_dq_rate > 0)) {
pie_vars_init(&q->vars);
+ }
+
+ if (!q->params.dq_rate_estimator)
+ q->vars.qdelay_old = qdelay;
}
static void pie_timer(struct timer_list *t)
@@ -497,7 +568,9 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
- nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
+ nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
+ nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
+ q->params.dq_rate_estimator))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -514,9 +587,6 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.prob = q->vars.prob,
.delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
NSEC_PER_USEC,
- /* unscale and return dq_rate in bytes per sec */
- .avg_dq_rate = q->vars.avg_dq_rate *
- (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
.packets_in = q->stats.packets_in,
.overlimit = q->stats.overlimit,
.maxq = q->stats.maxq,
@@ -524,6 +594,14 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.ecn_mark = q->stats.ecn_mark,
};
+ /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
+ st.dq_rate_estimating = q->params.dq_rate_estimator;
+
+ /* unscale and return dq_rate in bytes per sec */
+ if (q->params.dq_rate_estimator)
+ st.avg_dq_rate = q->vars.avg_dq_rate *
+ (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
+
return gnet_stats_copy_app(d, &st, sizeof(st));
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 2121187229cd..7cd68628c637 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1224,8 +1224,6 @@ static int taprio_enable_offload(struct net_device *dev,
goto done;
}
- taprio_offload_config_changed(q);
-
done:
taprio_offload_free(offload);
@@ -1505,6 +1503,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
call_rcu(&admin->rcu, taprio_free_sched_cb);
spin_unlock_irqrestore(&q->current_entry_lock, flags);
+
+ if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
+ taprio_offload_config_changed(q);
}
new_admin = NULL;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1ba893b85dad..8f8d18abd013 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -86,6 +86,8 @@ static struct sctp_association *sctp_association_init(
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
asoc->pf_retrans = sp->pf_retrans;
+ asoc->ps_retrans = sp->ps_retrans;
+ asoc->pf_expose = sp->pf_expose;
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -324,7 +326,7 @@ void sctp_association_free(struct sctp_association *asoc)
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
}
/* Mark as dead, so other users can know this structure is
@@ -627,6 +629,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* And the partial failure retrans threshold */
peer->pf_retrans = asoc->pf_retrans;
+ /* And the primary path switchover retrans threshold */
+ peer->ps_retrans = asoc->ps_retrans;
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
@@ -786,8 +790,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
enum sctp_transport_cmd command,
sctp_sn_error_t error)
{
+ int spc_state = SCTP_ADDR_AVAILABLE;
bool ulp_notify = true;
- int spc_state = 0;
/* Record the transition on the transport. */
switch (command) {
@@ -796,19 +800,13 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to heartbeat success, report the SCTP_ADDR_CONFIRMED
* state to the user, otherwise report SCTP_ADDR_AVAILABLE.
*/
- if (SCTP_UNCONFIRMED == transport->state &&
- SCTP_HEARTBEAT_SUCCESS == error)
- spc_state = SCTP_ADDR_CONFIRMED;
- else
- spc_state = SCTP_ADDR_AVAILABLE;
- /* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1 MTU, see SCTP
- * Quick failover draft section 5.1, point 5
- */
- if (transport->state == SCTP_PF) {
+ if (transport->state == SCTP_PF &&
+ asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
ulp_notify = false;
- transport->cwnd = asoc->pathmtu;
- }
+ else if (transport->state == SCTP_UNCONFIRMED &&
+ error == SCTP_HEARTBEAT_SUCCESS)
+ spc_state = SCTP_ADDR_CONFIRMED;
+
transport->state = SCTP_ACTIVE;
break;
@@ -817,19 +815,21 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to inactive state. Also, release the cached route since
* there may be a better route next time.
*/
- if (transport->state != SCTP_UNCONFIRMED)
+ if (transport->state != SCTP_UNCONFIRMED) {
transport->state = SCTP_INACTIVE;
- else {
+ spc_state = SCTP_ADDR_UNREACHABLE;
+ } else {
sctp_transport_dst_release(transport);
ulp_notify = false;
}
-
- spc_state = SCTP_ADDR_UNREACHABLE;
break;
case SCTP_TRANSPORT_PF:
transport->state = SCTP_PF;
- ulp_notify = false;
+ if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
+ ulp_notify = false;
+ else
+ spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
break;
default:
@@ -1073,7 +1073,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
- oldsk->sk_ack_backlog--;
+ sk_acceptq_removed(oldsk);
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 0851166b9175..8a15146faaeb 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -425,8 +425,8 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
r->idiag_wqueue = infox->asoc->sndbuf_used;
} else {
- r->idiag_rqueue = sk->sk_ack_backlog;
- r->idiag_wqueue = sk->sk_max_ack_backlog;
+ r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
}
if (infox->sctpinfo)
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ea53049d1db6..9d05b2e7bce2 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -164,7 +164,7 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
/* Increment the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
}
/* Free the endpoint structure. Delay cleanup until
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 08d14d86ecfb..fbbf19128c2d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1217,9 +1217,15 @@ static int __net_init sctp_defaults_init(struct net *net)
/* Max.Burst - 4 */
net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
+ /* Disable of Primary Path Switchover by default */
+ net->sctp.ps_retrans = SCTP_PS_RETRANS_MAX;
+
/* Enable pf state by default */
net->sctp.pf_enable = 1;
+ /* Ignore pf exposure feature by default */
+ net->sctp.pf_expose = SCTP_PF_EXPOSE_UNSET;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e52b2128e43b..acd737d4c0e0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -567,6 +567,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
SCTP_FAILED_THRESHOLD);
}
+ if (transport->error_count > transport->ps_retrans &&
+ asoc->peer.primary_path == transport &&
+ asoc->peer.active_path != transport)
+ sctp_assoc_set_primary(asoc, asoc->peer.active_path);
+
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ffd3262b7a41..83e4ca1fabda 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3943,18 +3943,22 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
*/
static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
- unsigned int optlen)
+ unsigned int optlen, bool v2)
{
- struct sctp_paddrthlds val;
+ struct sctp_paddrthlds_v2 val;
struct sctp_transport *trans;
struct sctp_association *asoc;
+ int len;
- if (optlen < sizeof(struct sctp_paddrthlds))
+ len = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+ if (optlen < len)
return -EINVAL;
- if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
- sizeof(struct sctp_paddrthlds)))
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
+ if (v2 && val.spt_pathpfthld > val.spt_pathcpthld)
+ return -EINVAL;
+
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
@@ -3963,6 +3967,8 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val.spt_pathcpthld;
trans->pf_retrans = val.spt_pathpfthld;
return 0;
@@ -3978,17 +3984,23 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
transports) {
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val.spt_pathcpthld;
trans->pf_retrans = val.spt_pathpfthld;
}
if (val.spt_pathmaxrxt)
asoc->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ asoc->ps_retrans = val.spt_pathcpthld;
asoc->pf_retrans = val.spt_pathpfthld;
} else {
struct sctp_sock *sp = sctp_sk(sk);
if (val.spt_pathmaxrxt)
sp->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ sp->ps_retrans = val.spt_pathcpthld;
sp->pf_retrans = val.spt_pathpfthld;
}
@@ -4589,6 +4601,40 @@ out:
return retval;
}
+static int sctp_setsockopt_pf_expose(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(params))
+ goto out;
+
+ if (copy_from_user(&params, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (params.assoc_value > SCTP_PF_EXPOSE_MAX)
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc)
+ asoc->pf_expose = params.assoc_value;
+ else
+ sctp_sk(sk)->pf_expose = params.assoc_value;
+ retval = 0;
+
+out:
+ return retval;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4744,7 +4790,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
- retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+ false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+ true);
break;
case SCTP_RECVRCVINFO:
retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
@@ -4798,6 +4849,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_ECN_SUPPORTED:
retval = sctp_setsockopt_ecn_supported(sk, optval, optlen);
break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_setsockopt_pf_expose(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -5041,6 +5095,8 @@ static int sctp_init_sock(struct sock *sk)
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
sp->pf_retrans = net->sctp.pf_retrans;
+ sp->ps_retrans = net->sctp.ps_retrans;
+ sp->pf_expose = net->sctp.pf_expose;
sp->pathmtu = 0; /* allow default discovery */
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
@@ -5521,8 +5577,16 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
pinfo.spinfo_assoc_id);
- if (!transport)
- return -EINVAL;
+ if (!transport) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (transport->state == SCTP_PF &&
+ transport->asoc->pf_expose == SCTP_PF_EXPOSE_DISABLE) {
+ retval = -EACCES;
+ goto out;
+ }
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
pinfo.spinfo_state = transport->state;
@@ -7170,18 +7234,19 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
- char __user *optval,
- int len,
- int __user *optlen)
+ char __user *optval, int len,
+ int __user *optlen, bool v2)
{
- struct sctp_paddrthlds val;
+ struct sctp_paddrthlds_v2 val;
struct sctp_transport *trans;
struct sctp_association *asoc;
+ int min;
- if (len < sizeof(struct sctp_paddrthlds))
+ min = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+ if (len < min)
return -EINVAL;
- len = sizeof(struct sctp_paddrthlds);
- if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
+ len = min;
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
@@ -7192,6 +7257,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
+ val.spt_pathcpthld = trans->ps_retrans;
goto out;
}
@@ -7204,11 +7270,13 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
if (asoc) {
val.spt_pathpfthld = asoc->pf_retrans;
val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ val.spt_pathcpthld = asoc->ps_retrans;
} else {
struct sctp_sock *sp = sctp_sk(sk);
val.spt_pathpfthld = sp->pf_retrans;
val.spt_pathmaxrxt = sp->pathmaxrxt;
+ val.spt_pathcpthld = sp->ps_retrans;
}
out:
@@ -7900,6 +7968,45 @@ out:
return retval;
}
+static int sctp_getsockopt_pf_expose(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->pf_expose
+ : sctp_sk(sk)->pf_expose;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -8049,7 +8156,12 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
- retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, true);
break;
case SCTP_GET_ASSOC_STATS:
retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
@@ -8112,6 +8224,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_ECN_SUPPORTED:
retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen);
break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_getsockopt_pf_expose(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -8376,7 +8491,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
}
}
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
return sctp_hash_endpoint(ep);
}
@@ -8430,7 +8545,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
else {
err = sctp_listen_start(sk, backlog);
if (err)
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 238cf1737576..4740aa70e652 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -34,6 +34,8 @@ static int rto_alpha_min = 0;
static int rto_beta_min = 0;
static int rto_alpha_max = 1000;
static int rto_beta_max = 1000;
+static int pf_expose_max = SCTP_PF_EXPOSE_MAX;
+static int ps_retrans_max = SCTP_PS_RETRANS_MAX;
static unsigned long max_autoclose_min = 0;
static unsigned long max_autoclose_max =
@@ -212,7 +214,16 @@ static struct ctl_table sctp_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
+ .extra2 = &init_net.sctp.ps_retrans,
+ },
+ {
+ .procname = "ps_retrans",
+ .data = &init_net.sctp.ps_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &init_net.sctp.pf_retrans,
+ .extra2 = &ps_retrans_max,
},
{
.procname = "sndbuf_policy",
@@ -318,6 +329,15 @@ static struct ctl_table sctp_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "pf_expose",
+ .data = &init_net.sctp.pf_expose,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &pf_expose_max,
+ },
{ /* sentinel */ }
};
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index b7d9fd285c71..b997072c72e5 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -25,6 +25,7 @@
#include <linux/in.h>
#include <linux/sched/signal.h>
#include <linux/if_vlan.h>
+#include <linux/rcupdate_wait.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -798,6 +799,7 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = EPIPE;
else if (signal_pending(current))
smc->sk.sk_err = -sock_intr_errno(timeo);
+ sock_put(&smc->sk); /* passive closing */
goto out;
}
@@ -1735,7 +1737,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_KEY:
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
- if (sk->sk_state == SMC_INIT) {
+ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
@@ -2038,22 +2040,28 @@ static int __init smc_init(void)
if (rc)
goto out_pernet_subsys;
+ rc = smc_core_init();
+ if (rc) {
+ pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
+ goto out_pnet;
+ }
+
rc = smc_llc_init();
if (rc) {
pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = smc_cdc_init();
if (rc) {
pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = proto_register(&smc_proto, 1);
if (rc) {
pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = proto_register(&smc_proto6, 1);
@@ -2085,6 +2093,8 @@ out_proto6:
proto_unregister(&smc_proto6);
out_proto:
proto_unregister(&smc_proto);
+out_core:
+ smc_core_exit();
out_pnet:
smc_pnet_exit();
out_pernet_subsys:
@@ -2095,14 +2105,15 @@ out_pernet_subsys:
static void __exit smc_exit(void)
{
- smc_core_exit();
static_branch_disable(&tcp_have_smc);
- smc_ib_unregister_client();
sock_unregister(PF_SMC);
+ smc_core_exit();
+ smc_ib_unregister_client();
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
unregister_pernet_subsys(&smc_net_ops);
+ rcu_barrier();
}
module_init(smc_init);
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 7dc07ec2379b..164f1584861b 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -131,6 +131,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
{
int rc;
+ if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
+ return -EPIPE;
+
if (conn->lgr->is_smcd) {
spin_lock_bh(&conn->send_lock);
rc = smcd_cdc_msg_send(conn);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 49bcebff6378..0879f7bed967 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1;
- smc_lgr_terminate(smc->conn.lgr);
+ smc_lgr_terminate(smc->conn.lgr, true);
}
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index d34e5adce2eb..290270c821ca 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -20,8 +20,6 @@
#include "smc_cdc.h"
#include "smc_close.h"
-#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
-
/* release the clcsock that is assigned to the smc_sock */
void smc_clcsock_release(struct smc_sock *smc)
{
@@ -110,6 +108,17 @@ int smc_close_abort(struct smc_connection *conn)
return smc_cdc_get_slot_and_msg_send(conn);
}
+static void smc_close_cancel_work(struct smc_sock *smc)
+{
+ struct sock *sk = &smc->sk;
+
+ release_sock(sk);
+ cancel_work_sync(&smc->conn.close_work);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ sk->sk_state = SMC_CLOSED;
+}
+
/* terminate smc socket abnormally - active abort
* link group is terminated, i.e. RDMA communication no longer possible
*/
@@ -126,23 +135,21 @@ void smc_close_active_abort(struct smc_sock *smc)
switch (sk->sk_state) {
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
- release_sock(sk);
- cancel_delayed_work_sync(&smc->conn.tx_work);
- lock_sock(sk);
+ smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
- release_sock(sk);
- cancel_delayed_work_sync(&smc->conn.tx_work);
- lock_sock(sk);
+ smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* postponed passive closing */
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
+ sk->sk_state = SMC_PEERABORTWAIT;
+ smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
@@ -150,7 +157,11 @@ void smc_close_active_abort(struct smc_sock *smc)
break;
case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT:
+ sk->sk_state = SMC_PEERABORTWAIT;
+ smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
+ smc_conn_free(&smc->conn);
+ release_clcsock = true;
break;
case SMC_INIT:
case SMC_PEERABORTWAIT:
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 0d92456729ab..bb92c7c6214c 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/reboot.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
@@ -39,6 +41,9 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.num = 0,
};
+static atomic_t lgr_cnt; /* number of existing link groups */
+static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
+
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
@@ -161,10 +166,10 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
* of the DELETE LINK sequence from server; or as server to
* initiate the delete processing. See smc_llc_rx_delete_link().
*/
-static int smc_link_send_delete(struct smc_link *lnk)
+static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
{
if (lnk->state == SMC_LNK_ACTIVE &&
- !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
+ !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
smc_llc_link_deleting(lnk);
return 0;
}
@@ -201,7 +206,7 @@ static void smc_lgr_free_work(struct work_struct *work)
if (!lgr->is_smcd && !lgr->terminating) {
/* try to send del link msg, on error free lgr immediately */
if (lnk->state == SMC_LNK_ACTIVE &&
- !smc_link_send_delete(lnk)) {
+ !smc_link_send_delete(lnk, true)) {
/* reschedule in case we never receive a response */
smc_lgr_schedule_free_work(lgr);
spin_unlock_bh(lgr_lock);
@@ -214,7 +219,7 @@ static void smc_lgr_free_work(struct work_struct *work)
if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk);
- if (lgr->is_smcd)
+ if (lgr->is_smcd && !lgr->terminating)
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr);
}
@@ -224,7 +229,7 @@ static void smc_lgr_terminate_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
terminate_work);
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(lgr, true);
}
/* create a new SMC link group */
@@ -275,6 +280,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->smcd = ini->ism_dev;
lgr_list = &ini->ism_dev->lgr_list;
lgr_lock = &lgr->smcd->lgr_lock;
+ lgr->peer_shutdown = 0;
+ atomic_inc(&ini->ism_dev->lgr_cnt);
} else {
/* SMC-R specific settings */
get_device(&ini->ib_dev->ibdev->dev);
@@ -317,6 +324,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
rc = smc_wr_create_link(lnk);
if (rc)
goto destroy_qp;
+ atomic_inc(&lgr_cnt);
+ atomic_inc(&ini->ib_dev->lnk_cnt);
}
smc->conn.lgr = lgr;
spin_lock_bh(lgr_lock);
@@ -380,7 +389,8 @@ void smc_conn_free(struct smc_connection *conn)
if (!lgr)
return;
if (lgr->is_smcd) {
- smc_ism_unset_conn(conn);
+ if (!list_empty(&lgr->list))
+ smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet);
} else {
smc_cdc_tx_dismiss_slots(conn);
@@ -403,6 +413,8 @@ static void smc_link_clear(struct smc_link *lnk)
smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk);
smc_wr_free_link_mem(lnk);
+ if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
+ wake_up(&lnk->smcibdev->lnks_deleted);
}
static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
@@ -480,11 +492,17 @@ static void smc_lgr_free(struct smc_link_group *lgr)
{
smc_lgr_free_bufs(lgr);
if (lgr->is_smcd) {
- smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- put_device(&lgr->smcd->dev);
+ if (!lgr->terminating) {
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ put_device(&lgr->smcd->dev);
+ }
+ if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
+ wake_up(&lgr->smcd->lgrs_deleted);
} else {
smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
+ if (!atomic_dec_return(&lgr_cnt))
+ wake_up(&lgrs_deleted);
}
kfree(lgr);
}
@@ -502,6 +520,20 @@ void smc_lgr_forget(struct smc_link_group *lgr)
spin_unlock_bh(lgr_lock);
}
+static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
+{
+ int i;
+
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ struct smc_buf_desc *buf_desc;
+
+ list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
+ buf_desc->len += sizeof(struct smcd_cdc_msg);
+ smc_ism_unregister_dmb(lgr->smcd, buf_desc);
+ }
+ }
+}
+
static void smc_sk_wake_ups(struct smc_sock *smc)
{
smc->sk.sk_write_space(&smc->sk);
@@ -510,20 +542,50 @@ static void smc_sk_wake_ups(struct smc_sock *smc)
}
/* kill a connection */
-static void smc_conn_kill(struct smc_connection *conn)
+static void smc_conn_kill(struct smc_connection *conn, bool soft)
{
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
- smc_close_abort(conn);
+ if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ else
+ smc_close_abort(conn);
conn->killed = 1;
+ smc->sk.sk_err = ECONNABORTED;
smc_sk_wake_ups(smc);
+ if (conn->lgr->is_smcd) {
+ smc_ism_unset_conn(conn);
+ if (soft)
+ tasklet_kill(&conn->rx_tsklet);
+ else
+ tasklet_unlock_wait(&conn->rx_tsklet);
+ } else {
+ smc_cdc_tx_dismiss_slots(conn);
+ }
smc_lgr_unregister_conn(conn);
- smc->sk.sk_err = ECONNABORTED;
smc_close_active_abort(smc);
}
+static void smc_lgr_cleanup(struct smc_link_group *lgr)
+{
+ if (lgr->is_smcd) {
+ smc_ism_signal_shutdown(lgr);
+ smcd_unregister_all_dmbs(lgr);
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ put_device(&lgr->smcd->dev);
+ } else {
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ wake_up(&lnk->wr_reg_wait);
+ if (lnk->state != SMC_LNK_INACTIVE) {
+ smc_link_send_delete(lnk, false);
+ smc_llc_link_inactive(lnk);
+ }
+ }
+}
+
/* terminate link group */
-static void __smc_lgr_terminate(struct smc_link_group *lgr)
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
struct smc_connection *conn;
struct smc_sock *smc;
@@ -531,6 +593,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
if (lgr->terminating)
return; /* lgr already terminating */
+ if (!soft)
+ cancel_delayed_work_sync(&lgr->free_work);
lgr->terminating = 1;
if (!lgr->is_smcd)
smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
@@ -544,20 +608,25 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
smc = container_of(conn, struct smc_sock, conn);
sock_hold(&smc->sk); /* sock_put below */
lock_sock(&smc->sk);
- smc_conn_kill(conn);
+ smc_conn_kill(conn, soft);
release_sock(&smc->sk);
sock_put(&smc->sk); /* sock_hold above */
read_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
}
read_unlock_bh(&lgr->conns_lock);
- if (!lgr->is_smcd)
- wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
- smc_lgr_schedule_free_work_fast(lgr);
+ smc_lgr_cleanup(lgr);
+ if (soft)
+ smc_lgr_schedule_free_work_fast(lgr);
+ else
+ smc_lgr_free(lgr);
}
-/* unlink and terminate link group */
-void smc_lgr_terminate(struct smc_link_group *lgr)
+/* unlink and terminate link group
+ * @soft: true if link group shutdown can take its time
+ * false if immediate link group shutdown is required
+ */
+void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
spinlock_t *lgr_lock;
@@ -567,9 +636,11 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
spin_unlock_bh(lgr_lock);
return; /* lgr already terminating */
}
+ if (!soft)
+ lgr->freeing = 1;
list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock);
- __smc_lgr_terminate(lgr);
+ __smc_lgr_terminate(lgr, soft);
}
/* Called when IB port is terminated */
@@ -582,18 +653,20 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (!lgr->is_smcd &&
lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
- lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
+ lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
list_move(&lgr->list, &lgr_free_list);
+ lgr->freeing = 1;
+ }
}
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
- __smc_lgr_terminate(lgr);
+ __smc_lgr_terminate(lgr, false);
}
}
-/* Called when SMC-D device is terminated or peer is lost */
+/* Called when peer lgr shutdown (regularly or abnormally) is received */
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{
struct smc_link_group *lgr, *l;
@@ -604,6 +677,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid || lgr->peer_gid == peer_gid) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
+ if (peer_gid) /* peer triggered termination */
+ lgr->peer_shutdown = 1;
list_move(&lgr->list, &lgr_free_list);
}
}
@@ -612,11 +687,67 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
/* cancel the regular free workers and actually free lgrs */
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
- __smc_lgr_terminate(lgr);
- cancel_delayed_work_sync(&lgr->free_work);
- if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
- smc_ism_signal_shutdown(lgr);
- smc_lgr_free(lgr);
+ schedule_work(&lgr->terminate_work);
+ }
+}
+
+/* Called when an SMCD device is removed or the smc module is unloaded */
+void smc_smcd_terminate_all(struct smcd_dev *smcd)
+{
+ struct smc_link_group *lgr, *lg;
+ LIST_HEAD(lgr_free_list);
+
+ spin_lock_bh(&smcd->lgr_lock);
+ list_splice_init(&smcd->lgr_list, &lgr_free_list);
+ list_for_each_entry(lgr, &lgr_free_list, list)
+ lgr->freeing = 1;
+ spin_unlock_bh(&smcd->lgr_lock);
+
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ __smc_lgr_terminate(lgr, false);
+ }
+
+ if (atomic_read(&smcd->lgr_cnt))
+ wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
+}
+
+/* Called when an SMCR device is removed or the smc module is unloaded.
+ * If smcibdev is given, all SMCR link groups using this device are terminated.
+ * If smcibdev is NULL, all SMCR link groups are terminated.
+ */
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
+{
+ struct smc_link_group *lgr, *lg;
+ LIST_HEAD(lgr_free_list);
+
+ spin_lock_bh(&smc_lgr_list.lock);
+ if (!smcibdev) {
+ list_splice_init(&smc_lgr_list.list, &lgr_free_list);
+ list_for_each_entry(lgr, &lgr_free_list, list)
+ lgr->freeing = 1;
+ } else {
+ list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
+ if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
+ list_move(&lgr->list, &lgr_free_list);
+ lgr->freeing = 1;
+ }
+ }
+ }
+ spin_unlock_bh(&smc_lgr_list.lock);
+
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ __smc_lgr_terminate(lgr, false);
+ }
+
+ if (smcibdev) {
+ if (atomic_read(&smcibdev->lnk_cnt))
+ wait_event(smcibdev->lnks_deleted,
+ !atomic_read(&smcibdev->lnk_cnt));
+ } else {
+ if (atomic_read(&lgr_cnt))
+ wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
}
}
@@ -1137,37 +1268,42 @@ static void smc_core_going_away(void)
spin_unlock(&smcd_dev_list.lock);
}
-/* Called (from smc_exit) when module is removed */
-void smc_core_exit(void)
+/* Clean up all SMC link groups */
+static void smc_lgrs_shutdown(void)
{
- struct smc_link_group *lgr, *lg;
- LIST_HEAD(lgr_freeing_list);
struct smcd_dev *smcd;
smc_core_going_away();
- spin_lock_bh(&smc_lgr_list.lock);
- list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
- spin_unlock_bh(&smc_lgr_list.lock);
+ smc_smcr_terminate_all(NULL);
spin_lock(&smcd_dev_list.lock);
list_for_each_entry(smcd, &smcd_dev_list.list, list)
- list_splice_init(&smcd->lgr_list, &lgr_freeing_list);
+ smc_smcd_terminate_all(smcd);
spin_unlock(&smcd_dev_list.lock);
+}
- list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
- list_del_init(&lgr->list);
- if (!lgr->is_smcd) {
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+static int smc_core_reboot_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ smc_lgrs_shutdown();
- if (lnk->state == SMC_LNK_ACTIVE)
- smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
- false);
- smc_llc_link_inactive(lnk);
- }
- cancel_delayed_work_sync(&lgr->free_work);
- if (lgr->is_smcd)
- smc_ism_signal_shutdown(lgr);
- smc_lgr_free(lgr); /* free link group */
- }
+ return 0;
+}
+
+static struct notifier_block smc_reboot_notifier = {
+ .notifier_call = smc_core_reboot_event,
+};
+
+int __init smc_core_init(void)
+{
+ atomic_set(&lgr_cnt, 0);
+ return register_reboot_notifier(&smc_reboot_notifier);
+}
+
+/* Called (from smc_exit) when module is removed */
+void smc_core_exit(void)
+{
+ unregister_reboot_notifier(&smc_reboot_notifier);
+ smc_lgrs_shutdown();
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index e6fd1ed42064..c472e12951d1 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -228,6 +228,8 @@ struct smc_link_group {
/* Peer GID (remote) */
struct smcd_dev *smcd;
/* ISM device for VLAN reg. */
+ u8 peer_shutdown : 1;
+ /* peer triggered shutdownn */
};
};
};
@@ -285,7 +287,7 @@ static inline struct smc_connection *smc_lgr_find_conn(
static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
- if (!lgr->terminating)
+ if (!lgr->terminating && !lgr->freeing)
schedule_work(&lgr->terminate_work);
}
@@ -294,10 +296,12 @@ struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
-void smc_lgr_terminate(struct smc_link_group *lgr);
+void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan);
+void smc_smcd_terminate_all(struct smcd_dev *dev);
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -314,6 +318,7 @@ void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
void smcd_conn_free(struct smc_connection *conn);
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
+int smc_core_init(void);
void smc_core_exit(void);
static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index af05daeb0538..548632621f4b 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
+#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
@@ -520,9 +521,9 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
if (!smcibdev->initialized)
return;
smcibdev->initialized = 0;
- smc_wr_remove_dev(smcibdev);
ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send);
+ smc_wr_remove_dev(smcibdev);
}
static struct ib_client smc_ib_client;
@@ -543,7 +544,8 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
smcibdev->ibdev = ibdev;
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
-
+ atomic_set(&smcibdev->lnk_cnt, 0);
+ init_waitqueue_head(&smcibdev->lnks_deleted);
spin_lock(&smc_ib_devices.lock);
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
spin_unlock(&smc_ib_devices.lock);
@@ -565,7 +567,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
schedule_work(&smcibdev->port_event_work);
}
-/* callback function for ib_register_client() */
+/* callback function for ib_unregister_client() */
static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{
struct smc_ib_device *smcibdev;
@@ -575,6 +577,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock);
+ smc_smcr_terminate_all(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev);
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 6a0069db6cae..255db87547d3 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <net/smc.h>
@@ -48,6 +49,8 @@ struct smc_ib_device { /* ib-device infos for smc */
struct work_struct port_event_work;
unsigned long port_event_mask;
DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
+ atomic_t lnk_cnt; /* number of links on ibdev */
+ wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
};
struct smc_buf_desc;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index ee7340898cb4..5c4727d5066e 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -146,6 +146,10 @@ out:
int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
{
struct smcd_dmb dmb;
+ int rc = 0;
+
+ if (!dmb_desc->dma_addr)
+ return rc;
memset(&dmb, 0, sizeof(dmb));
dmb.dmb_tok = dmb_desc->token;
@@ -153,7 +157,13 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
dmb.cpu_addr = dmb_desc->cpu_addr;
dmb.dma_addr = dmb_desc->dma_addr;
dmb.dmb_len = dmb_desc->len;
- return smcd->ops->unregister_dmb(smcd, &dmb);
+ rc = smcd->ops->unregister_dmb(smcd, &dmb);
+ if (!rc || rc == ISM_ERROR) {
+ dmb_desc->cpu_addr = NULL;
+ dmb_desc->dma_addr = 0;
+ }
+
+ return rc;
}
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
@@ -226,6 +236,9 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr)
int rc;
union smcd_sw_event_info ev_info;
+ if (lgr->peer_shutdown)
+ return 0;
+
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id;
ev_info.code = ISM_EVENT_REQUEST;
@@ -289,6 +302,7 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
spin_lock_init(&smcd->lgr_lock);
INIT_LIST_HEAD(&smcd->vlan);
INIT_LIST_HEAD(&smcd->lgr_list);
+ init_waitqueue_head(&smcd->lgrs_deleted);
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name);
if (!smcd->event_wq) {
@@ -313,12 +327,12 @@ EXPORT_SYMBOL_GPL(smcd_register_dev);
void smcd_unregister_dev(struct smcd_dev *smcd)
{
spin_lock(&smcd_dev_list.lock);
- list_del(&smcd->list);
+ list_del_init(&smcd->list);
spin_unlock(&smcd_dev_list.lock);
smcd->going_away = 1;
+ smc_smcd_terminate_all(smcd);
flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
- smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
device_del(&smcd->dev);
}
@@ -372,7 +386,7 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
spin_lock_irqsave(&smcd->lock, flags);
conn = smcd->conn[dmbno];
- if (conn)
+ if (conn && !conn->killed)
tasklet_schedule(&conn->rx_tsklet);
spin_unlock_irqrestore(&smcd->lock, flags);
}
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index e1918ffaf125..a9f6431dd69a 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
if (rc <= 0) {
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate(smc_get_lgr(link), true);
return;
}
next_interval = link->llc_testlink_time;
@@ -656,6 +656,7 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time)
void smc_llc_link_deleting(struct smc_link *link)
{
link->state = SMC_LNK_DELETING;
+ smc_wr_wakeup_tx_wait(link);
}
/* called in tasklet context */
@@ -663,6 +664,8 @@ void smc_llc_link_inactive(struct smc_link *link)
{
link->state = SMC_LNK_INACTIVE;
cancel_delayed_work(&link->llc_testlink_wrk);
+ smc_wr_wakeup_reg_wait(link);
+ smc_wr_wakeup_tx_wait(link);
}
/* called in worker context */
@@ -695,9 +698,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
int smc_llc_do_delete_rkey(struct smc_link *link,
struct smc_buf_desc *rmb_desc)
{
- int rc;
+ int rc = 0;
mutex_lock(&link->llc_delete_rkey_mutex);
+ if (link->state != SMC_LNK_ACTIVE)
+ goto out;
reinit_completion(&link->llc_delete_rkey);
rc = smc_llc_send_delete_rkey(link, rmb_desc);
if (rc)
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 352ee2fa17dc..82dedf052d86 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -376,8 +376,6 @@ static int smc_pnet_fill_entry(struct net *net,
return 0;
error:
- if (pnetelem->ndev)
- dev_put(pnetelem->ndev);
return rc;
}
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 824f096ee7de..0d42e7716b91 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
if (rc)
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(lgr, true);
return rc;
}
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 50743dc56c86..337ee52ad3d3 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -50,6 +50,26 @@ struct smc_wr_tx_pend { /* control data for a pending send request */
/*------------------------------- completion --------------------------------*/
+/* returns true if at least one tx work request is pending on the given link */
+static inline bool smc_wr_is_tx_pend(struct smc_link *link)
+{
+ if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
+ link->wr_tx_cnt) {
+ return true;
+ }
+ return false;
+}
+
+/* wait till all pending tx work requests on the given link are completed */
+static inline int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+{
+ if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
+ SMC_WR_TX_WAIT_PENDING_TIME))
+ return 0;
+ else /* timeout */
+ return -EPIPE;
+}
+
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
{
u32 i;
@@ -75,7 +95,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
link->wr_reg_state = FAILED;
else
link->wr_reg_state = CONFIRMED;
- wake_up(&link->wr_reg_wait);
+ smc_wr_wakeup_reg_wait(link);
return;
}
@@ -171,6 +191,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
struct smc_rdma_wr **wr_rdma_buf,
struct smc_wr_tx_pend_priv **wr_pend_priv)
{
+ struct smc_link_group *lgr = smc_get_lgr(link);
struct smc_wr_tx_pend *wr_pend;
u32 idx = link->wr_tx_cnt;
struct ib_send_wr *wr_ib;
@@ -179,19 +200,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
*wr_buf = NULL;
*wr_pend_priv = NULL;
- if (in_softirq()) {
+ if (in_softirq() || lgr->terminating) {
rc = smc_wr_tx_get_free_slot_index(link, &idx);
if (rc)
return rc;
} else {
- rc = wait_event_timeout(
+ rc = wait_event_interruptible_timeout(
link->wr_tx_wait,
link->state == SMC_LNK_INACTIVE ||
+ lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) {
/* timeout - terminate connections */
- smc_lgr_terminate_sched(smc_get_lgr(link));
+ smc_lgr_terminate_sched(lgr);
return -EPIPE;
}
if (idx == link->wr_tx_cnt)
@@ -227,6 +249,7 @@ int smc_wr_tx_put_slot(struct smc_link *link,
memset(&link->wr_tx_bufs[idx], 0,
sizeof(link->wr_tx_bufs[idx]));
test_and_clear_bit(idx, link->wr_tx_mask);
+ wake_up(&link->wr_tx_wait);
return 1;
}
@@ -510,8 +533,10 @@ void smc_wr_free_link(struct smc_link *lnk)
{
struct ib_device *ibdev;
- memset(lnk->wr_tx_mask, 0,
- BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
+ if (smc_wr_tx_wait_no_pending_sends(lnk))
+ memset(lnk->wr_tx_mask, 0,
+ BITS_TO_LONGS(SMC_WR_BUF_CNT) *
+ sizeof(*lnk->wr_tx_mask));
if (!lnk->smcibdev)
return;
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 09bf32fd3959..3ac99c898418 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -60,6 +60,16 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
atomic_long_set(wr_tx_id, val);
}
+static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
+{
+ wake_up_all(&lnk->wr_tx_wait);
+}
+
+static inline void smc_wr_wakeup_reg_wait(struct smc_link *lnk)
+{
+ wake_up(&lnk->wr_reg_wait);
+}
+
/* post a new receive work request to fill a completed old work request entry */
static inline int smc_wr_rx_post(struct smc_link *link)
{
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index b83e16ade4d2..716b61a701a8 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -35,6 +35,21 @@ config TIPC_MEDIA_UDP
Saying Y here will enable support for running TIPC over IP/UDP
bool
default y
+config TIPC_CRYPTO
+ bool "TIPC encryption support"
+ depends on TIPC
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ help
+ Saying Y here will enable support for TIPC encryption.
+ All TIPC messages will be encrypted/decrypted by using the currently most
+ advanced algorithm: AEAD AES-GCM (like IPSec or TLS) before leaving/
+ entering the TIPC stack.
+ Key setting from user-space is performed via netlink by a user program
+ (e.g. the iproute2 'tipc' tool).
+ bool
+ default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index c86aba0282af..11255e970dd4 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -16,6 +16,7 @@ CFLAGS_trace.o += -I$(src)
tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
+tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
obj-$(CONFIG_TIPC_DIAG) += diag.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 6ef1abdd525f..f41096a759fa 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -84,7 +84,7 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
*/
int tipc_bcast_get_mtu(struct net *net)
{
- return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
+ return tipc_link_mss(tipc_bc_sndlink(net));
}
void tipc_bcast_disable_rcast(struct net *net)
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 0214aa1c4427..d7ec26bd739d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -44,6 +44,7 @@
#include "netlink.h"
#include "udp_media.h"
#include "trace.h"
+#include "crypto.h"
#define MAX_ADDR_STR 60
@@ -315,6 +316,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
b->net_plane = bearer_id + 'A';
b->priority = prio;
test_and_set_bit_lock(0, &b->up);
+ refcount_set(&b->refcnt, 1);
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
@@ -351,6 +353,17 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
return 0;
}
+bool tipc_bearer_hold(struct tipc_bearer *b)
+{
+ return (b && refcount_inc_not_zero(&b->refcnt));
+}
+
+void tipc_bearer_put(struct tipc_bearer *b)
+{
+ if (b && refcount_dec_and_test(&b->refcnt))
+ kfree_rcu(b, rcu);
+}
+
/**
* bearer_disable
*
@@ -369,7 +382,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
if (b->disc)
tipc_disc_delete(b->disc);
RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
- kfree_rcu(b, rcu);
+ tipc_bearer_put(b);
tipc_mon_delete(net, bearer_id);
}
@@ -504,10 +517,15 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
rcu_read_lock();
b = bearer_get(net, bearer_id);
- if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
- b->media->send_msg(net, skb, b, dest);
- else
+ if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dest, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dest);
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
}
@@ -515,7 +533,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
*/
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst)
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
{
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -529,10 +548,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
__skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq);
- if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
- b->media->send_msg(net, skb, b, dst);
- else
+ if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
+ } else {
kfree_skb(skb);
+ }
}
rcu_read_unlock();
}
@@ -543,6 +567,7 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq)
{
struct tipc_net *tn = tipc_net(net);
+ struct tipc_media_addr *dst;
int net_id = tn->net_id;
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -557,7 +582,12 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
msg_set_non_seq(hdr, 1);
msg_set_mc_netid(hdr, net_id);
__skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, &b->bcast_addr);
+ dst = &b->bcast_addr;
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
}
rcu_read_unlock();
}
@@ -584,6 +614,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
skb_mark_not_on_list(skb);
+ TIPC_SKB_CB(skb)->flags = 0;
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ea0f3c49cbed..d0c79cc6c0c2 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -165,6 +165,7 @@ struct tipc_bearer {
struct tipc_discoverer *disc;
char net_plane;
unsigned long up;
+ refcount_t refcnt;
};
struct tipc_bearer_names {
@@ -210,6 +211,8 @@ int tipc_media_set_window(const char *name, u32 new_value);
int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
struct nlattr *attrs[]);
+bool tipc_bearer_hold(struct tipc_bearer *b);
+void tipc_bearer_put(struct tipc_bearer *b);
void tipc_disable_l2_media(struct tipc_bearer *b);
int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b, struct tipc_media_addr *dest);
@@ -229,7 +232,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct tipc_media_addr *dest);
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst);
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index ab648dd150ee..7532a00ac73d 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,8 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "core.h"
#include "name_table.h"
#include "subscr.h"
@@ -44,6 +42,7 @@
#include "socket.h"
#include "bcast.h"
#include "node.h"
+#include "crypto.h"
#include <linux/module.h>
@@ -68,6 +67,11 @@ static int __net_init tipc_init_net(struct net *net)
INIT_LIST_HEAD(&tn->node_list);
spin_lock_init(&tn->node_list_lock);
+#ifdef CONFIG_TIPC_CRYPTO
+ err = tipc_crypto_start(&tn->crypto_tx, net, NULL);
+ if (err)
+ goto out_crypto;
+#endif
err = tipc_sk_rht_init(net);
if (err)
goto out_sk_rht;
@@ -93,6 +97,11 @@ out_bclink:
out_nametbl:
tipc_sk_rht_destroy(net);
out_sk_rht:
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tn->crypto_tx);
+out_crypto:
+#endif
return err;
}
@@ -103,6 +112,9 @@ static void __net_exit tipc_exit_net(struct net *net)
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tipc_net(net)->crypto_tx);
+#endif
}
static void __net_exit tipc_pernet_pre_exit(struct net *net)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 8776d32a4a47..631d83c9705f 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -61,6 +61,12 @@
#include <net/genetlink.h>
#include <net/netns/hash.h>
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
struct tipc_node;
struct tipc_bearer;
struct tipc_bc_base;
@@ -68,6 +74,9 @@ struct tipc_link;
struct tipc_name_table;
struct tipc_topsrv;
struct tipc_monitor;
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto;
+#endif
#define TIPC_MOD_VER "2.0.0"
@@ -129,6 +138,11 @@ struct tipc_net {
/* Tracing of node internal messages */
struct packet_type loopback_pt;
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* TX crypto handler */
+ struct tipc_crypto *crypto_tx;
+#endif
};
static inline struct tipc_net *tipc_net(struct net *net)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
new file mode 100644
index 000000000000..990a872cec46
--- /dev/null
+++ b/net/tipc/crypto.c
@@ -0,0 +1,1986 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include "crypto.h"
+
+#define TIPC_TX_PROBE_LIM msecs_to_jiffies(1000) /* > 1s */
+#define TIPC_TX_LASTING_LIM msecs_to_jiffies(120000) /* 2 mins */
+#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(180000) /* 3 mins */
+#define TIPC_MAX_TFMS_DEF 10
+#define TIPC_MAX_TFMS_LIM 1000
+
+/**
+ * TIPC Key ids
+ */
+enum {
+ KEY_UNUSED = 0,
+ KEY_MIN,
+ KEY_1 = KEY_MIN,
+ KEY_2,
+ KEY_3,
+ KEY_MAX = KEY_3,
+};
+
+/**
+ * TIPC Crypto statistics
+ */
+enum {
+ STAT_OK,
+ STAT_NOK,
+ STAT_ASYNC,
+ STAT_ASYNC_OK,
+ STAT_ASYNC_NOK,
+ STAT_BADKEYS, /* tx only */
+ STAT_BADMSGS = STAT_BADKEYS, /* rx only */
+ STAT_NOKEYS,
+ STAT_SWITCHES,
+
+ MAX_STATS,
+};
+
+/* TIPC crypto statistics' header */
+static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
+ "async_nok", "badmsgs", "nokeys",
+ "switches"};
+
+/* Max TFMs number per key */
+int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+
+/**
+ * struct tipc_key - TIPC keys' status indicator
+ *
+ * 7 6 5 4 3 2 1 0
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * key: | (reserved)|passive idx| active idx|pending idx|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+struct tipc_key {
+#define KEY_BITS (2)
+#define KEY_MASK ((1 << KEY_BITS) - 1)
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 pending:2,
+ active:2,
+ passive:2, /* rx only */
+ reserved:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:2,
+ passive:2, /* rx only */
+ active:2,
+ pending:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ } __packed;
+ u8 keys;
+ };
+};
+
+/**
+ * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
+ */
+struct tipc_tfm {
+ struct crypto_aead *tfm;
+ struct list_head list;
+};
+
+/**
+ * struct tipc_aead - TIPC AEAD key structure
+ * @tfm_entry: per-cpu pointer to one entry in TFM list
+ * @crypto: TIPC crypto owns this key
+ * @cloned: reference to the source key in case cloning
+ * @users: the number of the key users (TX/RX)
+ * @salt: the key's SALT value
+ * @authsize: authentication tag size (max = 16)
+ * @mode: crypto mode is applied to the key
+ * @hint[]: a hint for user key
+ * @rcu: struct rcu_head
+ * @seqno: the key seqno (cluster scope)
+ * @refcnt: the key reference counter
+ */
+struct tipc_aead {
+#define TIPC_AEAD_HINT_LEN (5)
+ struct tipc_tfm * __percpu *tfm_entry;
+ struct tipc_crypto *crypto;
+ struct tipc_aead *cloned;
+ atomic_t users;
+ u32 salt;
+ u8 authsize;
+ u8 mode;
+ char hint[TIPC_AEAD_HINT_LEN + 1];
+ struct rcu_head rcu;
+
+ atomic64_t seqno ____cacheline_aligned;
+ refcount_t refcnt ____cacheline_aligned;
+
+} ____cacheline_aligned;
+
+/**
+ * struct tipc_crypto_stats - TIPC Crypto statistics
+ */
+struct tipc_crypto_stats {
+ unsigned int stat[MAX_STATS];
+};
+
+/**
+ * struct tipc_crypto - TIPC TX/RX crypto structure
+ * @net: struct net
+ * @node: TIPC node (RX)
+ * @aead: array of pointers to AEAD keys for encryption/decryption
+ * @peer_rx_active: replicated peer RX active key index
+ * @key: the key states
+ * @working: the crypto is working or not
+ * @stats: the crypto statistics
+ * @sndnxt: the per-peer sndnxt (TX)
+ * @timer1: general timer 1 (jiffies)
+ * @timer2: general timer 1 (jiffies)
+ * @lock: tipc_key lock
+ */
+struct tipc_crypto {
+ struct net *net;
+ struct tipc_node *node;
+ struct tipc_aead __rcu *aead[KEY_MAX + 1]; /* key[0] is UNUSED */
+ atomic_t peer_rx_active;
+ struct tipc_key key;
+ u8 working:1;
+ struct tipc_crypto_stats __percpu *stats;
+
+ atomic64_t sndnxt ____cacheline_aligned;
+ unsigned long timer1;
+ unsigned long timer2;
+ spinlock_t lock; /* crypto lock */
+
+} ____cacheline_aligned;
+
+/* struct tipc_crypto_tx_ctx - TX context for callbacks */
+struct tipc_crypto_tx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+ struct tipc_media_addr dst;
+};
+
+/* struct tipc_crypto_rx_ctx - RX context for callbacks */
+struct tipc_crypto_rx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+};
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
+static inline void tipc_aead_put(struct tipc_aead *aead);
+static void tipc_aead_free(struct rcu_head *rp);
+static int tipc_aead_users(struct tipc_aead __rcu *aead);
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode);
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg);
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err);
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b);
+static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err);
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx);
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending);
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos);
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
+ struct tipc_msg *hdr);
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err);
+static void tipc_crypto_do_cmd(struct net *net, int cmd);
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
+#ifdef TIPC_CRYPTO_DEBUG
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf);
+#endif
+
+#define key_next(cur) ((cur) % KEY_MAX + 1)
+
+#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
+ rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_swap(rcu_ptr, ptr, lock) \
+ rcu_swap_protected((rcu_ptr), (ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
+do { \
+ typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \
+ lockdep_is_held(lock)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ tipc_aead_put(__tmp); \
+} while (0)
+
+#define tipc_crypto_key_detach(rcu_ptr, lock) \
+ tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
+
+/**
+ * tipc_aead_key_validate - Validate a AEAD user key
+ */
+int tipc_aead_key_validate(struct tipc_aead_key *ukey)
+{
+ int keylen;
+
+ /* Check if algorithm exists */
+ if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
+ pr_info("Not found cipher: \"%s\"!\n", ukey->alg_name);
+ return -ENODEV;
+ }
+
+ /* Currently, we only support the "gcm(aes)" cipher algorithm */
+ if (strcmp(ukey->alg_name, "gcm(aes)"))
+ return -ENOTSUPP;
+
+ /* Check if key size is correct */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+ if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_256))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
+ tmp = NULL;
+ rcu_read_unlock();
+
+ return tmp;
+}
+
+static inline void tipc_aead_put(struct tipc_aead *aead)
+{
+ if (aead && refcount_dec_and_test(&aead->refcnt))
+ call_rcu(&aead->rcu, tipc_aead_free);
+}
+
+/**
+ * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
+ * @rp: rcu head pointer
+ */
+static void tipc_aead_free(struct rcu_head *rp)
+{
+ struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
+ struct tipc_tfm *tfm_entry, *head, *tmp;
+
+ if (aead->cloned) {
+ tipc_aead_put(aead->cloned);
+ } else {
+ head = *this_cpu_ptr(aead->tfm_entry);
+ list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
+ crypto_free_aead(tfm_entry->tfm);
+ list_del(&tfm_entry->list);
+ kfree(tfm_entry);
+ }
+ /* Free the head */
+ crypto_free_aead(head->tfm);
+ list_del(&head->list);
+ kfree(head);
+ }
+ free_percpu(aead->tfm_entry);
+ kfree(aead);
+}
+
+static int tipc_aead_users(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+ int users = 0;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ users = atomic_read(&tmp->users);
+ rcu_read_unlock();
+
+ return users;
+}
+
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&tmp->users, 1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
+{
+ struct tipc_aead *tmp;
+ int cur;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp) {
+ do {
+ cur = atomic_read(&tmp->users);
+ if (cur == val)
+ break;
+ } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
+ */
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
+{
+ struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry);
+
+ *tfm_entry = list_next_entry(*tfm_entry, list);
+ return (*tfm_entry)->tfm;
+}
+
+/**
+ * tipc_aead_init - Initiate TIPC AEAD
+ * @aead: returned new TIPC AEAD key handle pointer
+ * @ukey: pointer to user key data
+ * @mode: the key mode
+ *
+ * Allocate a (list of) new cipher transformation (TFM) with the specific user
+ * key data if valid. The number of the allocated TFMs can be set via the sysfs
+ * "net/tipc/max_tfms" first.
+ * Also, all the other AEAD data are also initialized.
+ *
+ * Return: 0 if the initiation is successful, otherwise: < 0
+ */
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_tfm *tfm_entry, *head;
+ struct crypto_aead *tfm;
+ struct tipc_aead *tmp;
+ int keylen, err, cpu;
+ int tfm_cnt = 0;
+
+ if (unlikely(*aead))
+ return -EEXIST;
+
+ /* Allocate a new AEAD */
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (unlikely(!tmp))
+ return -ENOMEM;
+
+ /* The key consists of two parts: [AES-KEY][SALT] */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+
+ /* Allocate per-cpu TFM entry pointer */
+ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
+ if (!tmp->tfm_entry) {
+ kzfree(tmp);
+ return -ENOMEM;
+ }
+
+ /* Make a list of TFMs with the user key data */
+ do {
+ tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ err = PTR_ERR(tfm);
+ break;
+ }
+
+ if (unlikely(!tfm_cnt &&
+ crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
+ crypto_free_aead(tfm);
+ err = -ENOTSUPP;
+ break;
+ }
+
+ err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
+ err |= crypto_aead_setkey(tfm, ukey->key, keylen);
+ if (unlikely(err)) {
+ crypto_free_aead(tfm);
+ break;
+ }
+
+ tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
+ if (unlikely(!tfm_entry)) {
+ crypto_free_aead(tfm);
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&tfm_entry->list);
+ tfm_entry->tfm = tfm;
+
+ /* First entry? */
+ if (!tfm_cnt) {
+ head = tfm_entry;
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
+ }
+ } else {
+ list_add_tail(&tfm_entry->list, &head->list);
+ }
+
+ } while (++tfm_cnt < sysctl_tipc_max_tfms);
+
+ /* Not any TFM is allocated? */
+ if (!tfm_cnt) {
+ free_percpu(tmp->tfm_entry);
+ kzfree(tmp);
+ return err;
+ }
+
+ /* Copy some chars from the user key as a hint */
+ memcpy(tmp->hint, ukey->key, TIPC_AEAD_HINT_LEN);
+ tmp->hint[TIPC_AEAD_HINT_LEN] = '\0';
+
+ /* Initialize the other data */
+ tmp->mode = mode;
+ tmp->cloned = NULL;
+ tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
+ atomic_set(&tmp->users, 0);
+ atomic64_set(&tmp->seqno, 0);
+ refcount_set(&tmp->refcnt, 1);
+
+ *aead = tmp;
+ return 0;
+}
+
+/**
+ * tipc_aead_clone - Clone a TIPC AEAD key
+ * @dst: dest key for the cloning
+ * @src: source key to clone from
+ *
+ * Make a "copy" of the source AEAD key data to the dest, the TFMs list is
+ * common for the keys.
+ * A reference to the source is hold in the "cloned" pointer for the later
+ * freeing purposes.
+ *
+ * Note: this must be done in cluster-key mode only!
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
+{
+ struct tipc_aead *aead;
+ int cpu;
+
+ if (!src)
+ return -ENOKEY;
+
+ if (src->mode != CLUSTER_KEY)
+ return -EINVAL;
+
+ if (unlikely(*dst))
+ return -EEXIST;
+
+ aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
+ if (unlikely(!aead))
+ return -ENOMEM;
+
+ aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
+ if (unlikely(!aead->tfm_entry)) {
+ kzfree(aead);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(aead->tfm_entry, cpu) =
+ *per_cpu_ptr(src->tfm_entry, cpu);
+ }
+
+ memcpy(aead->hint, src->hint, sizeof(src->hint));
+ aead->mode = src->mode;
+ aead->salt = src->salt;
+ aead->authsize = src->authsize;
+ atomic_set(&aead->users, 0);
+ atomic64_set(&aead->seqno, 0);
+ refcount_set(&aead->refcnt, 1);
+
+ WARN_ON(!refcount_inc_not_zero(&src->refcnt));
+ aead->cloned = src;
+
+ *dst = aead;
+ return 0;
+}
+
+/**
+ * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
+ * @tfm: cipher handle to be registered with the request
+ * @crypto_ctx_size: size of crypto context for callback
+ * @iv: returned pointer to IV data
+ * @req: returned pointer to AEAD request data
+ * @sg: returned pointer to SG lists
+ * @nsg: number of SG lists to be allocated
+ *
+ * Allocate memory to store the crypto context data, AEAD request, IV and SG
+ * lists, the memory layout is as follows:
+ * crypto_ctx || iv || aead_req || sg[]
+ *
+ * Return: the pointer to the memory areas in case of success, otherwise NULL
+ */
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg)
+{
+ unsigned int iv_size, req_size;
+ unsigned int len;
+ u8 *mem;
+
+ iv_size = crypto_aead_ivsize(tfm);
+ req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+
+ len = crypto_ctx_size;
+ len += iv_size;
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += req_size;
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += nsg * sizeof(**sg);
+
+ mem = kmalloc(len, GFP_ATOMIC);
+ if (!mem)
+ return NULL;
+
+ *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
+ crypto_aead_alignmask(tfm) + 1);
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+ crypto_tfm_ctx_alignment());
+ *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+ __alignof__(struct scatterlist));
+
+ return (void *)mem;
+}
+
+/**
+ * tipc_aead_encrypt - Encrypt a message
+ * @aead: TIPC AEAD key for the message encryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message will be delivered after the encryption
+ * @dst: the destination media address
+ * @__dnode: TIPC dest node if "known"
+ *
+ * Return:
+ * 0 : if the encryption has completed
+ * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * < 0 : the encryption has failed
+ */
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
+ struct tipc_crypto_tx_ctx *tx_ctx;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, len, tailen, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ /* Make sure message len at least 4-byte aligned */
+ len = ALIGN(skb->len, 4);
+ tailen = len - skb->len + aead->authsize;
+
+ /* Expand skb tail for authentication tag:
+ * As for simplicity, we'd have made sure skb having enough tailroom
+ * for authentication tag @skb allocation. Even when skb is nonlinear
+ * but there is no frag_list, it should be still fine!
+ * Otherwise, we must cow it to be a writable buffer with the tailroom.
+ */
+#ifdef TIPC_CRYPTO_DEBUG
+ SKB_LINEAR_ASSERT(skb);
+ if (tailen > skb_tailroom(skb)) {
+ pr_warn("TX: skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
+ }
+#endif
+
+ if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
+ nsg = 1;
+ trailer = skb;
+ } else {
+ /* TODO: We could avoid skb_cow_data() if skb has no frag_list
+ * e.g. by skb_fill_page_desc() to add another page to the skb
+ * with the wanted tailen... However, page skbs look not often,
+ * so take it easy now!
+ * Cloned skbs e.g. from link_xmit() seems no choice though :(
+ */
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+ }
+
+ pskb_put(skb, trailer, tailen);
+
+ /* Allocate memory for the AEAD operation */
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)]
+ * In case we're in cluster-key mode, SALT is varied by xor-ing with
+ * the source address (or w0 of id), otherwise with the dest address
+ * if dest is known.
+ */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= ehdr->addr; /* __be32 */
+ else if (__dnode)
+ salt ^= tipc_node_get_addr(__dnode);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_encrypt_done, skb);
+ tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
+ tx_ctx->aead = aead;
+ tx_ctx->bearer = b;
+ memcpy(&tx_ctx->dst, dst, sizeof(*dst));
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do encrypt */
+ rc = crypto_aead_encrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = tx_ctx->bearer;
+ struct tipc_aead *aead = tx_ctx->aead;
+ struct tipc_crypto *tx = aead->crypto;
+ struct net *net = tx->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ if (likely(test_bit(0, &b->up)))
+ b->media->send_msg(net, skb, b, &tx_ctx->dst);
+ else
+ kfree_skb(skb);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
+ kfree_skb(skb);
+ break;
+ }
+
+ kfree(tx_ctx);
+ tipc_bearer_put(b);
+ tipc_aead_put(aead);
+}
+
+/**
+ * tipc_aead_decrypt - Decrypt an encrypted message
+ * @net: struct net
+ * @aead: TIPC AEAD for the message decryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message has been received
+ *
+ * Return:
+ * 0 : if the decryption has completed
+ * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * < 0 : the decryption has failed
+ */
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto_rx_ctx *rx_ctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ struct sk_buff *unused;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ if (unlikely(!aead))
+ return -ENOKEY;
+
+ /* Cow skb data if needed */
+ if (likely(!skb_cloned(skb) &&
+ (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
+ nsg = 1 + skb_shinfo(skb)->nr_frags;
+ } else {
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+ }
+
+ /* Allocate memory for the AEAD operation */
+ tfm = tipc_aead_tfm_next(aead);
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Reconstruct IV: */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= ehdr->addr; /* __be32 */
+ else if (ehdr->destined)
+ salt ^= tipc_own_addr(net);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_decrypt_done, skb);
+ rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
+ rx_ctx->aead = aead;
+ rx_ctx->bearer = b;
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do decrypt */
+ rc = crypto_aead_decrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = rx_ctx->bearer;
+ struct tipc_aead *aead = rx_ctx->aead;
+ struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
+ struct net *net = aead->crypto->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
+ break;
+ }
+
+ kfree(rx_ctx);
+ tipc_crypto_rcv_complete(net, aead, b, &skb, err);
+ if (likely(skb)) {
+ if (likely(test_bit(0, &b->up)))
+ tipc_rcv(net, skb, b);
+ else
+ kfree_skb(skb);
+ }
+
+ tipc_bearer_put(b);
+}
+
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
+{
+ return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+}
+
+/**
+ * tipc_ehdr_validate - Validate an encryption message
+ * @skb: the message buffer
+ *
+ * Returns "true" if this is a valid encryption message, otherwise "false"
+ */
+bool tipc_ehdr_validate(struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr;
+ int ehsz;
+
+ if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
+ return false;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (unlikely(ehdr->version != TIPC_EVERSION))
+ return false;
+ ehsz = tipc_ehdr_size(ehdr);
+ if (unlikely(!pskb_may_pull(skb, ehsz)))
+ return false;
+ if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
+ return false;
+ if (unlikely(!ehdr->tx_key))
+ return false;
+
+ return true;
+}
+
+/**
+ * tipc_ehdr_build - Build TIPC encryption message header
+ * @net: struct net
+ * @aead: TX AEAD key to be used for the message encryption
+ * @tx_key: key id used for the message encryption
+ * @skb: input/output message skb
+ * @__rx: RX crypto handle if dest is "known"
+ *
+ * Return: the header size if the building is successful, otherwise < 0
+ */
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_ehdr *ehdr;
+ u32 user = msg_user(hdr);
+ u64 seqno;
+ int ehsz;
+
+ /* Make room for encryption header */
+ ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+ WARN_ON(skb_headroom(skb) < ehsz);
+ ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
+
+ /* Obtain a seqno first:
+ * Use the key seqno (= cluster wise) if dest is unknown or we're in
+ * cluster key mode, otherwise it's better for a per-peer seqno!
+ */
+ if (!__rx || aead->mode == CLUSTER_KEY)
+ seqno = atomic64_inc_return(&aead->seqno);
+ else
+ seqno = atomic64_inc_return(&__rx->sndnxt);
+
+ /* Revoke the key if seqno is wrapped around */
+ if (unlikely(!seqno))
+ return tipc_crypto_key_revoke(net, tx_key);
+
+ /* Word 1-2 */
+ ehdr->seqno = cpu_to_be64(seqno);
+
+ /* Words 0, 3- */
+ ehdr->version = TIPC_EVERSION;
+ ehdr->user = 0;
+ ehdr->keepalive = 0;
+ ehdr->tx_key = tx_key;
+ ehdr->destined = (__rx) ? 1 : 0;
+ ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->reserved_1 = 0;
+ ehdr->reserved_2 = 0;
+
+ switch (user) {
+ case LINK_CONFIG:
+ ehdr->user = LINK_CONFIG;
+ memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
+ break;
+ default:
+ if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
+ ehdr->user = LINK_PROTOCOL;
+ ehdr->keepalive = msg_is_keepalive(hdr);
+ }
+ ehdr->addr = hdr->hdr[3];
+ break;
+ }
+
+ return ehsz;
+}
+
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending)
+{
+#ifdef TIPC_CRYPTO_DEBUG
+ struct tipc_key old = c->key;
+ char buf[32];
+#endif
+
+ c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
+ ((new_active & KEY_MASK) << (KEY_BITS)) |
+ ((new_pending & KEY_MASK));
+
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("%s(%s): key changing %s ::%pS\n",
+ (c->node) ? "RX" : "TX",
+ (c->node) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net),
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
+#endif
+}
+
+/**
+ * tipc_crypto_key_init - Initiate a new user / AEAD key
+ * @c: TIPC crypto to which new key is attached
+ * @ukey: the user key
+ * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ *
+ * A new TIPC AEAD key will be allocated and initiated with the specified user
+ * key, then attached to the TIPC crypto.
+ *
+ * Return: new key id in case of success, otherwise: < 0
+ */
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_aead *aead = NULL;
+ int rc = 0;
+
+ /* Initiate with the new user key */
+ rc = tipc_aead_init(&aead, ukey, mode);
+
+ /* Attach it to the crypto */
+ if (likely(!rc)) {
+ rc = tipc_crypto_key_attach(c, aead, 0);
+ if (rc < 0)
+ tipc_aead_free(&aead->rcu);
+ }
+
+ pr_info("%s(%s): key initiating, rc %d!\n",
+ (c->node) ? "RX" : "TX",
+ (c->node) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net),
+ rc);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
+ * @c: TIPC crypto to which the new AEAD key is attached
+ * @aead: the new AEAD key pointer
+ * @pos: desired slot in the crypto key array, = 0 if any!
+ *
+ * Return: new key id in case of success, otherwise: -EBUSY
+ */
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos)
+{
+ u8 new_pending, new_passive, new_key;
+ struct tipc_key key;
+ int rc = -EBUSY;
+
+ spin_lock_bh(&c->lock);
+ key = c->key;
+ if (key.active && key.passive)
+ goto exit;
+ if (key.passive && !tipc_aead_users(c->aead[key.passive]))
+ goto exit;
+ if (key.pending) {
+ if (pos)
+ goto exit;
+ if (tipc_aead_users(c->aead[key.pending]) > 0)
+ goto exit;
+ /* Replace it */
+ new_pending = key.pending;
+ new_passive = key.passive;
+ new_key = new_pending;
+ } else {
+ if (pos) {
+ if (key.active && pos != key_next(key.active)) {
+ new_pending = key.pending;
+ new_passive = pos;
+ new_key = new_passive;
+ goto attach;
+ } else if (!key.active && !key.passive) {
+ new_pending = pos;
+ new_passive = key.passive;
+ new_key = new_pending;
+ goto attach;
+ }
+ }
+ new_pending = key_next(key.active ?: key.passive);
+ new_passive = key.passive;
+ new_key = new_pending;
+ }
+
+attach:
+ aead->crypto = c;
+ tipc_crypto_key_set_state(c, new_passive, key.active, new_pending);
+ tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
+
+ c->working = 1;
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ rc = new_key;
+
+exit:
+ spin_unlock_bh(&c->lock);
+ return rc;
+}
+
+void tipc_crypto_key_flush(struct tipc_crypto *c)
+{
+ int k;
+
+ spin_lock_bh(&c->lock);
+ c->working = 0;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_crypto_key_detach(c->aead[k], &c->lock);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ spin_unlock_bh(&c->lock);
+}
+
+/**
+ * tipc_crypto_key_try_align - Align RX keys if possible
+ * @rx: RX crypto handle
+ * @new_pending: new pending slot if aligned (= TX key from peer)
+ *
+ * Peer has used an unknown key slot, this only happens when peer has left and
+ * rejoned, or we are newcomer.
+ * That means, there must be no active key but a pending key at unaligned slot.
+ * If so, we try to move the pending key to the new slot.
+ * Note: A potential passive key can exist, it will be shifted correspondingly!
+ *
+ * Return: "true" if key is successfully aligned, otherwise "false"
+ */
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
+{
+ struct tipc_aead *tmp1, *tmp2 = NULL;
+ struct tipc_key key;
+ bool aligned = false;
+ u8 new_passive = 0;
+ int x;
+
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (key.pending == new_pending) {
+ aligned = true;
+ goto exit;
+ }
+ if (key.active)
+ goto exit;
+ if (!key.pending)
+ goto exit;
+ if (tipc_aead_users(rx->aead[key.pending]) > 0)
+ goto exit;
+
+ /* Try to "isolate" this pending key first */
+ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
+ if (!refcount_dec_if_one(&tmp1->refcnt))
+ goto exit;
+ rcu_assign_pointer(rx->aead[key.pending], NULL);
+
+ /* Move passive key if any */
+ if (key.passive) {
+ tipc_aead_rcu_swap(rx->aead[key.passive], tmp2, &rx->lock);
+ x = (key.passive - key.pending + new_pending) % KEY_MAX;
+ new_passive = (x <= 0) ? x + KEY_MAX : x;
+ }
+
+ /* Re-allocate the key(s) */
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ rcu_assign_pointer(rx->aead[new_pending], tmp1);
+ if (new_passive)
+ rcu_assign_pointer(rx->aead[new_passive], tmp2);
+ refcount_set(&tmp1->refcnt, 1);
+ aligned = true;
+ pr_info("RX(%s): key is aligned!\n", tipc_node_get_id_str(rx->node));
+
+exit:
+ spin_unlock(&rx->lock);
+ return aligned;
+}
+
+/**
+ * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
+ * @tx: TX crypto handle
+ * @rx: RX crypto handle (can be NULL)
+ * @skb: the message skb which will be decrypted later
+ *
+ * This function looks up the existing TX keys and pick one which is suitable
+ * for the message decryption, that must be a cluster key and not used before
+ * on the same message (i.e. recursive).
+ *
+ * Return: the TX AEAD key handle in case of success, otherwise NULL
+ */
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key = tx->key;
+ u8 k, i = 0;
+
+ /* Initialize data if not yet */
+ if (!skb_cb->tx_clone_deferred) {
+ skb_cb->tx_clone_deferred = 1;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ }
+
+ skb_cb->tx_clone_ctx.rx = rx;
+ if (++skb_cb->tx_clone_ctx.recurs > 2)
+ return NULL;
+
+ /* Pick one TX key */
+ spin_lock(&tx->lock);
+ do {
+ k = (i == 0) ? key.pending :
+ ((i == 1) ? key.active : key.passive);
+ if (!k)
+ continue;
+ aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
+ if (!aead)
+ continue;
+ if (aead->mode != CLUSTER_KEY ||
+ aead == skb_cb->tx_clone_ctx.last) {
+ aead = NULL;
+ continue;
+ }
+ /* Ok, found one cluster key */
+ skb_cb->tx_clone_ctx.last = aead;
+ WARN_ON(skb->next);
+ skb->next = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb->next))
+ pr_warn("Failed to clone skb for next round if any\n");
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
+ break;
+ } while (++i < 3);
+ spin_unlock(&tx->lock);
+
+ return aead;
+}
+
+/**
+ * tipc_crypto_key_synch: Synch own key data according to peer key status
+ * @rx: RX crypto handle
+ * @new_rx_active: latest RX active key from peer
+ * @hdr: TIPCv2 message
+ *
+ * This function updates the peer node related data as the peer RX active key
+ * has changed, so the number of TX keys' users on this node are increased and
+ * decreased correspondingly.
+ *
+ * The "per-peer" sndnxt is also reset when the peer key has switched.
+ */
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
+ struct tipc_msg *hdr)
+{
+ struct net *net = rx->net;
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ u8 cur_rx_active;
+
+ /* TX might be even not ready yet */
+ if (unlikely(!tx->key.active && !tx->key.pending))
+ return;
+
+ cur_rx_active = atomic_read(&rx->peer_rx_active);
+ if (likely(cur_rx_active == new_rx_active))
+ return;
+
+ /* Make sure this message destined for this node */
+ if (unlikely(msg_short(hdr) ||
+ msg_destnode(hdr) != tipc_own_addr(net)))
+ return;
+
+ /* Peer RX active key has changed, try to update owns' & TX users */
+ if (atomic_cmpxchg(&rx->peer_rx_active,
+ cur_rx_active,
+ new_rx_active) == cur_rx_active) {
+ if (new_rx_active)
+ tipc_aead_users_inc(tx->aead[new_rx_active], INT_MAX);
+ if (cur_rx_active)
+ tipc_aead_users_dec(tx->aead[cur_rx_active], 0);
+
+ atomic64_set(&rx->sndnxt, 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("TX(%s): key users changed %d-- %d++, peer RX(%s)\n",
+ tipc_own_id_string(net), cur_rx_active,
+ new_rx_active, tipc_node_get_id_str(rx->node));
+#endif
+ }
+}
+
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_key key;
+
+ spin_lock(&tx->lock);
+ key = tx->key;
+ WARN_ON(!key.active || tx_key != key.active);
+
+ /* Free the active key */
+ tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ spin_unlock(&tx->lock);
+
+ pr_warn("TX(%s): key is revoked!\n", tipc_own_id_string(net));
+ return -EKEYREVOKED;
+}
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node)
+{
+ struct tipc_crypto *c;
+
+ if (*crypto)
+ return -EEXIST;
+
+ /* Allocate crypto */
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ if (!c)
+ return -ENOMEM;
+
+ /* Allocate statistic structure */
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ if (!c->stats) {
+ kzfree(c);
+ return -ENOMEM;
+ }
+
+ c->working = 0;
+ c->net = net;
+ c->node = node;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ spin_lock_init(&c->lock);
+ *crypto = c;
+
+ return 0;
+}
+
+void tipc_crypto_stop(struct tipc_crypto **crypto)
+{
+ struct tipc_crypto *c, *tx, *rx;
+ bool is_rx;
+ u8 k;
+
+ if (!*crypto)
+ return;
+
+ rcu_read_lock();
+ /* RX stopping? => decrease TX key users if any */
+ is_rx = !!((*crypto)->node);
+ if (is_rx) {
+ rx = *crypto;
+ tx = tipc_net(rx->net)->crypto_tx;
+ k = atomic_read(&rx->peer_rx_active);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ /* Release AEAD keys */
+ c = *crypto;
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_aead_put(rcu_dereference(c->aead[k]));
+ rcu_read_unlock();
+
+ pr_warn("%s(%s) has been purged, node left!\n",
+ (is_rx) ? "RX" : "TX",
+ (is_rx) ? tipc_node_get_id_str((*crypto)->node) :
+ tipc_own_id_string((*crypto)->net));
+
+ /* Free this crypto statistics */
+ free_percpu(c->stats);
+
+ *crypto = NULL;
+ kzfree(c);
+}
+
+void tipc_crypto_timeout(struct tipc_crypto *rx)
+{
+ struct tipc_net *tn = tipc_net(rx->net);
+ struct tipc_crypto *tx = tn->crypto_tx;
+ struct tipc_key key;
+ u8 new_pending, new_passive;
+ int cmd;
+
+ /* TX key activating:
+ * The pending key (users > 0) -> active
+ * The active key if any (users == 0) -> free
+ */
+ spin_lock(&tx->lock);
+ key = tx->key;
+ if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
+ goto s1;
+ if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
+ goto s1;
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_LIM))
+ goto s1;
+
+ tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
+ if (key.active)
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
+ pr_info("TX(%s): key %d is activated!\n", tipc_own_id_string(tx->net),
+ key.pending);
+
+s1:
+ spin_unlock(&tx->lock);
+
+ /* RX key activating:
+ * The pending key (users > 0) -> active
+ * The active key if any -> passive, freed later
+ */
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
+ goto s2;
+
+ new_pending = (key.passive &&
+ !tipc_aead_users(rx->aead[key.passive])) ?
+ key.passive : 0;
+ new_passive = (key.active) ?: ((new_pending) ? 0 : key.passive);
+ tipc_crypto_key_set_state(rx, new_passive, key.pending, new_pending);
+ this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
+ pr_info("RX(%s): key %d is activated!\n",
+ tipc_node_get_id_str(rx->node), key.pending);
+ goto s5;
+
+s2:
+ /* RX key "faulty" switching:
+ * The faulty pending key (users < -30) -> passive
+ * The passive key (users = 0) -> pending
+ * Note: This only happens after RX deactivated - s3!
+ */
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -30)
+ goto s3;
+ if (!key.passive || tipc_aead_users(rx->aead[key.passive]) != 0)
+ goto s3;
+
+ new_pending = key.passive;
+ new_passive = key.pending;
+ tipc_crypto_key_set_state(rx, new_passive, key.active, new_pending);
+ goto s5;
+
+s3:
+ /* RX key deactivating:
+ * The passive key if any -> pending
+ * The active key -> passive (users = 0) / pending
+ * The pending key if any -> passive (users = 0)
+ */
+ key = rx->key;
+ if (!key.active)
+ goto s4;
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM))
+ goto s4;
+
+ new_pending = (key.passive) ?: key.active;
+ new_passive = (key.passive) ? key.active : key.pending;
+ tipc_aead_users_set(rx->aead[new_pending], 0);
+ if (new_passive)
+ tipc_aead_users_set(rx->aead[new_passive], 0);
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ pr_info("RX(%s): key %d is deactivated!\n",
+ tipc_node_get_id_str(rx->node), key.active);
+ goto s5;
+
+s4:
+ /* RX key passive -> freed: */
+ key = rx->key;
+ if (!key.passive || !tipc_aead_users(rx->aead[key.passive]))
+ goto s5;
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM))
+ goto s5;
+
+ tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
+ tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
+ pr_info("RX(%s): key %d is freed!\n", tipc_node_get_id_str(rx->node),
+ key.passive);
+
+s5:
+ spin_unlock(&rx->lock);
+
+ /* Limit max_tfms & do debug commands if needed */
+ if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
+ return;
+
+ cmd = sysctl_tipc_max_tfms;
+ sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
+ tipc_crypto_do_cmd(rx->net, cmd);
+}
+
+/**
+ * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
+ * @net: struct net
+ * @skb: input/output message skb pointer
+ * @b: bearer used for xmit later
+ * @dst: destination media address
+ * @__dnode: destination node for reference if any
+ *
+ * First, build an encryption message header on the top of the message, then
+ * encrypt the original TIPC message by using the active or pending TX key.
+ * If the encryption is successful, the encrypted skb is returned directly or
+ * via the callback.
+ * Otherwise, the skb is freed!
+ *
+ * Return:
+ * 0 : the encryption has succeeded (or no encryption)
+ * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
+ * -ENOKEK : the encryption has failed due to no key
+ * -EKEYREVOKED : the encryption has failed due to key revoked
+ * -ENOMEM : the encryption has failed due to no memory
+ * < 0 : the encryption has failed due to other reasons
+ */
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead = NULL;
+ struct sk_buff *probe;
+ int rc = -ENOKEY;
+ u8 tx_key;
+
+ /* No encryption? */
+ if (!tx->working)
+ return 0;
+
+ /* Try with the pending key if available and:
+ * 1) This is the only choice (i.e. no active key) or;
+ * 2) Peer has switched to this key (unicast only) or;
+ * 3) It is time to do a pending key probe;
+ */
+ if (unlikely(key.pending)) {
+ tx_key = key.pending;
+ if (!key.active)
+ goto encrypt;
+ if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->probe)
+ goto encrypt;
+ if (!__rx &&
+ time_after(jiffies, tx->timer2 + TIPC_TX_PROBE_LIM)) {
+ tx->timer2 = jiffies;
+ probe = skb_clone(*skb, GFP_ATOMIC);
+ if (probe) {
+ TIPC_SKB_CB(probe)->probe = 1;
+ tipc_crypto_xmit(net, &probe, b, dst, __dnode);
+ if (probe)
+ b->media->send_msg(net, probe, b, dst);
+ }
+ }
+ }
+ /* Else, use the active key if any */
+ if (likely(key.active)) {
+ tx_key = key.active;
+ goto encrypt;
+ }
+ goto exit;
+
+encrypt:
+ aead = tipc_aead_get(tx->aead[tx_key]);
+ if (unlikely(!aead))
+ goto exit;
+ rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
+ if (likely(rc > 0))
+ rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
+
+exit:
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY)
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ else if (rc == -EKEYREVOKED)
+ this_cpu_inc(stats->stat[STAT_BADKEYS]);
+ kfree_skb(*skb);
+ *skb = NULL;
+ break;
+ }
+
+ tipc_aead_put(aead);
+ return rc;
+}
+
+/**
+ * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
+ * @net: struct net
+ * @rx: RX crypto handle
+ * @skb: input/output message skb pointer
+ * @b: bearer where the message has been received
+ *
+ * If the decryption is successful, the decrypted skb is returned directly or
+ * as the callback, the encryption header and auth tag will be trimed out
+ * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
+ * Otherwise, the skb will be freed!
+ * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
+ * cluster key(s) can be taken for decryption (- recursive).
+ *
+ * Return:
+ * 0 : the decryption has successfully completed
+ * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
+ * -ENOKEY : the decryption has failed due to no key
+ * -EBADMSG : the decryption has failed due to bad message
+ * -ENOMEM : the decryption has failed due to no memory
+ * < 0 : the decryption has failed due to other reasons
+ */
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats;
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key;
+ int rc = -ENOKEY;
+ u8 tx_key = 0;
+
+ /* New peer?
+ * Let's try with TX key (i.e. cluster mode) & verify the skb first!
+ */
+ if (unlikely(!rx))
+ goto pick_tx;
+
+ /* Pick RX key according to TX key, three cases are possible:
+ * 1) The current active key (likely) or;
+ * 2) The pending (new or deactivated) key (if any) or;
+ * 3) The passive or old active key (i.e. users > 0);
+ */
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+ key = rx->key;
+ if (likely(tx_key == key.active))
+ goto decrypt;
+ if (tx_key == key.pending)
+ goto decrypt;
+ if (tx_key == key.passive) {
+ rx->timer2 = jiffies;
+ if (tipc_aead_users(rx->aead[key.passive]) > 0)
+ goto decrypt;
+ }
+
+ /* Unknown key, let's try to align RX key(s) */
+ if (tipc_crypto_key_try_align(rx, tx_key))
+ goto decrypt;
+
+pick_tx:
+ /* No key suitable? Try to pick one from TX... */
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb);
+ if (aead)
+ goto decrypt;
+ goto exit;
+
+decrypt:
+ rcu_read_lock();
+ if (!aead)
+ aead = tipc_aead_get(rx->aead[tx_key]);
+ rc = tipc_aead_decrypt(net, aead, *skb, b);
+ rcu_read_unlock();
+
+exit:
+ stats = ((rx) ?: tx)->stats;
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY) {
+ kfree_skb(*skb);
+ *skb = NULL;
+ if (rx)
+ tipc_node_put(rx->node);
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ return rc;
+ } else if (rc == -EBADMSG) {
+ this_cpu_inc(stats->stat[STAT_BADMSGS]);
+ }
+ break;
+ }
+
+ tipc_crypto_rcv_complete(net, aead, b, skb, rc);
+ return rc;
+}
+
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
+ struct tipc_crypto *rx = aead->crypto;
+ struct tipc_aead *tmp = NULL;
+ struct tipc_ehdr *ehdr;
+ struct tipc_node *n;
+ u8 rx_key_active;
+ bool destined;
+
+ /* Is this completed by TX? */
+ if (unlikely(!rx->node)) {
+ rx = skb_cb->tx_clone_ctx.rx;
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_info("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
+#endif
+ if (unlikely(err)) {
+ if (err == -EBADMSG && (*skb)->next)
+ tipc_rcv(net, (*skb)->next, b);
+ goto free_skb;
+ }
+
+ if (likely((*skb)->next)) {
+ kfree_skb((*skb)->next);
+ (*skb)->next = NULL;
+ }
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ if (!rx) {
+ WARN_ON(ehdr->user != LINK_CONFIG);
+ n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
+ true);
+ rx = tipc_node_crypto_rx(n);
+ if (unlikely(!rx))
+ goto free_skb;
+ }
+
+ /* Skip cloning this time as we had a RX pending key */
+ if (rx->key.pending)
+ goto rcv;
+ if (tipc_aead_clone(&tmp, aead) < 0)
+ goto rcv;
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key) < 0) {
+ tipc_aead_free(&tmp->rcu);
+ goto rcv;
+ }
+ tipc_aead_put(aead);
+ aead = tipc_aead_get(tmp);
+ }
+
+ if (unlikely(err)) {
+ tipc_aead_users_dec(aead, INT_MIN);
+ goto free_skb;
+ }
+
+ /* Set the RX key's user */
+ tipc_aead_users_set(aead, 1);
+
+rcv:
+ /* Mark this point, RX works */
+ rx->timer1 = jiffies;
+
+ /* Remove ehdr & auth. tag prior to tipc_rcv() */
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ destined = ehdr->destined;
+ rx_key_active = ehdr->rx_key_active;
+ skb_pull(*skb, tipc_ehdr_size(ehdr));
+ pskb_trim(*skb, (*skb)->len - aead->authsize);
+
+ /* Validate TIPCv2 message */
+ if (unlikely(!tipc_msg_validate(skb))) {
+ pr_err_ratelimited("Packet dropped after decryption!\n");
+ goto free_skb;
+ }
+
+ /* Update peer RX active key & TX users */
+ if (destined)
+ tipc_crypto_key_synch(rx, rx_key_active, buf_msg(*skb));
+
+ /* Mark skb decrypted */
+ skb_cb->decrypted = 1;
+
+ /* Clear clone cxt if any */
+ if (likely(!skb_cb->tx_clone_deferred))
+ goto exit;
+ skb_cb->tx_clone_deferred = 0;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ goto exit;
+
+free_skb:
+ kfree_skb(*skb);
+ *skb = NULL;
+
+exit:
+ tipc_aead_put(aead);
+ if (rx)
+ tipc_node_put(rx->node);
+}
+
+static void tipc_crypto_do_cmd(struct net *net, int cmd)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tn->crypto_tx, *rx;
+ struct list_head *p;
+ unsigned int stat;
+ int i, j, cpu;
+ char buf[200];
+
+ /* Currently only one command is supported */
+ switch (cmd) {
+ case 0xfff1:
+ goto print_stats;
+ default:
+ return;
+ }
+
+print_stats:
+ /* Print a header */
+ pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
+
+ /* Print key status */
+ pr_info("Key status:\n");
+ pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
+ tipc_crypto_key_dump(tx, buf));
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
+ tipc_crypto_key_dump(rx, buf));
+ }
+ rcu_read_unlock();
+
+ /* Print crypto statistics */
+ for (i = 0, j = 0; i < MAX_STATS; i++)
+ j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
+ pr_info("\nCounter %s", buf);
+
+ memset(buf, '-', 115);
+ buf[115] = '\0';
+ pr_info("%s\n", buf);
+
+ j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ j = scnprintf(buf, 200, "RX(%7.7s) ",
+ tipc_node_get_id_str(rx->node));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ",
+ stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+ }
+ rcu_read_unlock();
+
+ pr_info("\n======================== Done ========================\n");
+}
+
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
+{
+ struct tipc_key key = c->key;
+ struct tipc_aead *aead;
+ int k, i = 0;
+ char *s;
+
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
+
+ rcu_read_lock();
+ aead = rcu_dereference(c->aead[k]);
+ if (aead)
+ i += scnprintf(buf + i, 200 - i,
+ "{\"%s...\", \"%s\"}/%d:%d",
+ aead->hint,
+ (aead->mode == CLUSTER_KEY) ? "c" : "p",
+ atomic_read(&aead->users),
+ refcount_read(&aead->refcnt));
+ rcu_read_unlock();
+ i += scnprintf(buf + i, 200 - i, "\n");
+ }
+
+ if (c->node)
+ i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
+ atomic_read(&c->peer_rx_active));
+
+ return buf;
+}
+
+#ifdef TIPC_CRYPTO_DEBUG
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf)
+{
+ struct tipc_key *key = &old;
+ int k, i = 0;
+ char *s;
+
+ /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
+again:
+ i += scnprintf(buf + i, 32 - i, "[");
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == key->passive)
+ s = "pas";
+ else if (k == key->active)
+ s = "act";
+ else if (k == key->pending)
+ s = "pen";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 32 - i,
+ (k != KEY_MAX) ? "%s " : "%s", s);
+ }
+ if (key != &new) {
+ i += scnprintf(buf + i, 32 - i, "] -> ");
+ key = &new;
+ goto again;
+ }
+ i += scnprintf(buf + i, 32 - i, "]");
+ return buf;
+}
+#endif
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
new file mode 100644
index 000000000000..c3de769f49e8
--- /dev/null
+++ b/net/tipc/crypto.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * net/tipc/crypto.h: Include file for TIPC crypto
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef CONFIG_TIPC_CRYPTO
+#ifndef _TIPC_CRYPTO_H
+#define _TIPC_CRYPTO_H
+
+#include "core.h"
+#include "node.h"
+#include "msg.h"
+#include "bearer.h"
+
+#define TIPC_EVERSION 7
+
+/* AEAD aes(gcm) */
+#define TIPC_AES_GCM_KEY_SIZE_128 16
+#define TIPC_AES_GCM_KEY_SIZE_192 24
+#define TIPC_AES_GCM_KEY_SIZE_256 32
+
+#define TIPC_AES_GCM_SALT_SIZE 4
+#define TIPC_AES_GCM_IV_SIZE 12
+#define TIPC_AES_GCM_TAG_SIZE 16
+
+/**
+ * TIPC crypto modes:
+ * - CLUSTER_KEY:
+ * One single key is used for both TX & RX in all nodes in the cluster.
+ * - PER_NODE_KEY:
+ * Each nodes in the cluster has one TX key, for RX a node needs to know
+ * its peers' TX key for the decryption of messages from those nodes.
+ */
+enum {
+ CLUSTER_KEY = 1,
+ PER_NODE_KEY = (1 << 1),
+};
+
+extern int sysctl_tipc_max_tfms __read_mostly;
+
+/**
+ * TIPC encryption message format:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w0:|Ver=7| User |D|TX |RX |K| Rsvd |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w1:| Seqno |
+ * w2:| (8 octets) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w3:\ Prevnode \
+ * / (4 or 16 octets) /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Encrypted complete TIPC V2 header and user data /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | AuthTag |
+ * | (16 octets) |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Word0:
+ * Ver : = 7 i.e. TIPC encryption message version
+ * User : = 7 (for LINK_PROTOCOL); = 13 (for LINK_CONFIG) or = 0
+ * D : The destined bit i.e. the message's destination node is
+ * "known" or not at the message encryption
+ * TX : TX key used for the message encryption
+ * RX : Currently RX active key corresponding to the destination
+ * node's TX key (when the "D" bit is set)
+ * K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * Rsvd : Reserved bit, field
+ * Word1-2:
+ * Seqno : The 64-bit sequence number of the encrypted message, also
+ * part of the nonce used for the message encryption/decryption
+ * Word3-:
+ * Prevnode: The source node address, or ID in case LINK_CONFIG only
+ * AuthTag : The authentication tag for the message integrity checking
+ * generated by the message encryption
+ */
+struct tipc_ehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 destined:1,
+ user:4,
+ version:3;
+ __u8 reserved_1:3,
+ keepalive:1,
+ rx_key_active:2,
+ tx_key:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:3,
+ user:4,
+ destined:1;
+ __u8 tx_key:2,
+ rx_key_active:2,
+ keepalive:1,
+ reserved_1:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 reserved_2;
+ } __packed;
+ __be32 w0;
+ };
+ __be64 seqno;
+ union {
+ __be32 addr;
+ __u8 id[NODE_ID_LEN]; /* For a LINK_CONFIG message only! */
+ };
+#define EHDR_SIZE (offsetof(struct tipc_ehdr, addr) + sizeof(__be32))
+#define EHDR_CFG_SIZE (sizeof(struct tipc_ehdr))
+#define EHDR_MIN_SIZE (EHDR_SIZE)
+#define EHDR_MAX_SIZE (EHDR_CFG_SIZE)
+#define EMSG_OVERHEAD (EHDR_SIZE + TIPC_AES_GCM_TAG_SIZE)
+} __packed;
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node);
+void tipc_crypto_stop(struct tipc_crypto **crypto);
+void tipc_crypto_timeout(struct tipc_crypto *rx);
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b);
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode);
+void tipc_crypto_key_flush(struct tipc_crypto *c);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey);
+bool tipc_ehdr_validate(struct sk_buff *skb);
+
+#endif /* _TIPC_CRYPTO_H */
+#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 038861bad72b..fb72031228c9 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -44,6 +44,7 @@
#include "netlink.h"
#include "monitor.h"
#include "trace.h"
+#include "crypto.h"
#include <linux/pkt_sched.h>
@@ -397,6 +398,15 @@ int tipc_link_mtu(struct tipc_link *l)
return l->mtu;
}
+int tipc_link_mss(struct tipc_link *l)
+{
+#ifdef CONFIG_TIPC_CRYPTO
+ return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
+#else
+ return l->mtu - INT_H_SIZE;
+#endif
+}
+
u16 tipc_link_rcv_nxt(struct tipc_link *l)
{
return l->rcv_nxt;
@@ -948,6 +958,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
u16 seqno = l->snd_nxt;
int pkt_cnt = skb_queue_len(list);
int imp = msg_importance(hdr);
+ unsigned int mss = tipc_link_mss(l);
unsigned int maxwin = l->window;
unsigned int mtu = l->mtu;
bool new_bundle;
@@ -1000,8 +1011,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
continue;
}
if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
- mtu - INT_H_SIZE, l->addr,
- &new_bundle)) {
+ mss, l->addr, &new_bundle)) {
if (skb) {
/* Keep a ref. to the skb for next try */
l->backlog[imp].target_bskb = skb;
@@ -1087,7 +1097,7 @@ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
return false;
if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
- msecs_to_jiffies(r->tolerance)))
+ msecs_to_jiffies(r->tolerance * 10)))
return false;
hdr = buf_msg(skb);
@@ -1154,7 +1164,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
- _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
return 0;
hdr = buf_msg(_skb);
@@ -1430,8 +1440,7 @@ next_gap_ack:
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
- _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
- GFP_ATOMIC);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
continue;
hdr = buf_msg(_skb);
@@ -1731,21 +1740,6 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
return;
__skb_queue_head_init(&tnlq);
- __skb_queue_head_init(&tmpxq);
- __skb_queue_head_init(&frags);
-
- /* At least one packet required for safe algorithm => add dummy */
- skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
- BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
- 0, 0, TIPC_ERR_NO_PORT);
- if (!skb) {
- pr_warn("%sunable to create tunnel packet\n", link_co_err);
- return;
- }
- __skb_queue_tail(&tnlq, skb);
- tipc_link_xmit(l, &tnlq, &tmpxq);
- __skb_queue_purge(&tmpxq);
-
/* Link Synching:
* From now on, send only one single ("dummy") SYNCH message
* to peer. The SYNCH message does not contain any data, just
@@ -1771,6 +1765,20 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
return;
}
+ __skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&frags);
+ /* At least one packet required for safe algorithm => add dummy */
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
+ 0, 0, TIPC_ERR_NO_PORT);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, &tmpxq);
+ __skb_queue_purge(&tmpxq);
+
/* Initialize reusable tunnel packet header */
tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
mtyp, INT_H_SIZE, l->addr);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index adcad65e761c..c09e9d49d0a3 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -141,6 +141,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
int tipc_link_bc_peers(struct tipc_link *l);
void tipc_link_set_mtu(struct tipc_link *l, int mtu);
int tipc_link_mtu(struct tipc_link *l);
+int tipc_link_mss(struct tipc_link *l);
void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
struct sk_buff_head *xmitq);
void tipc_link_build_bc_sync_msg(struct tipc_link *l,
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 6a6eae88442f..58708b4c7719 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -665,6 +665,21 @@ void tipc_mon_delete(struct net *net, int bearer_id)
kfree(mon);
}
+void tipc_mon_reinit_self(struct net *net)
+{
+ struct tipc_monitor *mon;
+ int bearer_id;
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ mon = tipc_monitor(net, bearer_id);
+ if (!mon)
+ continue;
+ write_lock_bh(&mon->lock);
+ mon->self->addr = tipc_own_addr(net);
+ write_unlock_bh(&mon->lock);
+ }
+}
+
int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
{
struct tipc_net *tn = tipc_net(net);
diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
index 2a21b93e0d04..ed63d2e650b0 100644
--- a/net/tipc/monitor.h
+++ b/net/tipc/monitor.h
@@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id);
int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id, u32 *prev_node);
+void tipc_mon_reinit_self(struct net *net);
extern const int tipc_max_domain_size;
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index acb7be592fb1..0d515d20b056 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -39,10 +39,16 @@
#include "msg.h"
#include "addr.h"
#include "name_table.h"
+#include "crypto.h"
#define MAX_FORWARD_SIZE 1024
+#ifdef CONFIG_TIPC_CRYPTO
+#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
+#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
+#else
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
#define BUF_TAILROOM 16
+#endif
static unsigned int align(unsigned int i)
{
@@ -61,7 +67,11 @@ static unsigned int align(unsigned int i)
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
+#ifdef CONFIG_TIPC_CRYPTO
+ unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
+#else
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+#endif
skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
@@ -173,7 +183,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
}
if (fragid == LAST_FRAGMENT) {
- TIPC_SKB_CB(head)->validated = false;
+ TIPC_SKB_CB(head)->validated = 0;
if (unlikely(!tipc_msg_validate(&head)))
goto err;
*buf = head;
@@ -271,6 +281,7 @@ bool tipc_msg_validate(struct sk_buff **_skb)
if (unlikely(TIPC_SKB_CB(skb)->validated))
return true;
+
if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
return false;
@@ -292,7 +303,7 @@ bool tipc_msg_validate(struct sk_buff **_skb)
if (unlikely(skb->len < msz))
return false;
- TIPC_SKB_CB(skb)->validated = true;
+ TIPC_SKB_CB(skb)->validated = 1;
return true;
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 14697e6c995e..6d466ebdb64f 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,16 +102,42 @@ struct plist;
#define TIPC_MEDIA_INFO_OFFSET 5
struct tipc_skb_cb {
- struct sk_buff *tail;
- unsigned long nxt_retr;
- unsigned long retr_stamp;
- u32 bytes_read;
- u32 orig_member;
- u16 chain_imp;
- u16 ackers;
- u16 retr_cnt;
- bool validated;
-};
+ union {
+ struct {
+ struct sk_buff *tail;
+ unsigned long nxt_retr;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
+ u16 chain_imp;
+ u16 ackers;
+ u16 retr_cnt;
+ } __packed;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct {
+ struct tipc_crypto *rx;
+ struct tipc_aead *last;
+ u8 recurs;
+ } tx_clone_ctx __packed;
+#endif
+ } __packed;
+ union {
+ struct {
+ u8 validated:1;
+#ifdef CONFIG_TIPC_CRYPTO
+ u8 encrypted:1;
+ u8 decrypted:1;
+ u8 probe:1;
+ u8 tx_clone_deferred:1;
+#endif
+ };
+ u8 flags;
+ };
+ u8 reserved;
+#ifdef CONFIG_TIPC_CRYPTO
+ void *crypto_ctx;
+#endif
+} __packed;
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 85707c185360..2de3cec9929d 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -42,6 +42,7 @@
#include "node.h"
#include "bcast.h"
#include "netlink.h"
+#include "monitor.h"
/*
* The TIPC locking policy is designed to ensure a very fine locking
@@ -136,6 +137,7 @@ static void tipc_net_finalize(struct net *net, u32 addr)
tipc_set_node_addr(net, addr);
tipc_named_reinit(net);
tipc_sk_reinit(net);
+ tipc_mon_reinit_self(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr);
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index d32bbd0f5e46..e53231bd23b4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -102,7 +102,11 @@ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
[TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
- [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_ID] = { .type = NLA_BINARY,
+ .len = TIPC_NODEID_LEN},
+ [TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
+ .len = TIPC_AEAD_KEY_SIZE_MAX},
};
/* Properties valid for media, bearer and link */
@@ -257,6 +261,18 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
.dumpit = tipc_udp_nl_dump_remoteip,
},
#endif
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .cmd = TIPC_NL_KEY_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_key,
+ },
+ {
+ .cmd = TIPC_NL_KEY_FLUSH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_flush_key,
+ },
+#endif
};
struct genl_family tipc_genl_family __ro_after_init = {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4b60928049ea..aaf595613e6e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -44,6 +44,7 @@
#include "discover.h"
#include "netlink.h"
#include "trace.h"
+#include "crypto.h"
#define INVALID_NODE_SIG 0x10000
#define NODE_CLEANUP_AFTER 300000
@@ -89,6 +90,7 @@ struct tipc_bclink_entry {
* @links: array containing references to all links to node
* @action_flags: bit mask of different types of node actions
* @state: connectivity state vs peer node
+ * @preliminary: a preliminary node or not
* @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
@@ -99,6 +101,7 @@ struct tipc_bclink_entry {
* @publ_list: list of publications
* @rcu: rcu struct for tipc_node
* @delete_at: indicates the time for deleting a down node
+ * @crypto_rx: RX crypto handler
*/
struct tipc_node {
u32 addr;
@@ -112,6 +115,7 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool preliminary;
bool failover_sent;
u16 sync_point;
int link_cnt;
@@ -120,6 +124,7 @@ struct tipc_node {
u32 signature;
u32 link_id;
u8 peer_id[16];
+ char peer_id_string[NODE_ID_STR_LEN];
struct list_head publ_list;
struct list_head conn_sks;
unsigned long keepalive_intv;
@@ -128,6 +133,9 @@ struct tipc_node {
unsigned long delete_at;
struct net *peer_net;
u32 peer_hash_mix;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_crypto *crypto_rx;
+#endif
};
/* Node FSM states and events:
@@ -165,7 +173,6 @@ static void tipc_node_timeout(struct timer_list *t);
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
-static void tipc_node_put(struct tipc_node *node);
static bool node_is_up(struct tipc_node *n);
static void tipc_node_delete_from_list(struct tipc_node *node);
@@ -245,15 +252,51 @@ u16 tipc_node_get_capabilities(struct net *net, u32 addr)
return caps;
}
+u32 tipc_node_get_addr(struct tipc_node *node)
+{
+ return (node) ? node->addr : 0;
+}
+
+char *tipc_node_get_id_str(struct tipc_node *node)
+{
+ return node->peer_id_string;
+}
+
+#ifdef CONFIG_TIPC_CRYPTO
+/**
+ * tipc_node_crypto_rx - Retrieve crypto RX handle from node
+ * Note: node ref counter must be held first!
+ */
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
+{
+ return (__n) ? __n->crypto_rx : NULL;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
+{
+ return container_of(pos, struct tipc_node, list)->crypto_rx;
+}
+#endif
+
+void tipc_node_free(struct rcu_head *rp)
+{
+ struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&n->crypto_rx);
+#endif
+ kfree(n);
+}
+
static void tipc_node_kref_release(struct kref *kref)
{
struct tipc_node *n = container_of(kref, struct tipc_node, kref);
kfree(n->bc_entry.link);
- kfree_rcu(n, rcu);
+ call_rcu(&n->rcu, tipc_node_free);
}
-static void tipc_node_put(struct tipc_node *node)
+void tipc_node_put(struct tipc_node *node)
{
kref_put(&node->kref, tipc_node_kref_release);
}
@@ -274,7 +317,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
rcu_read_lock();
hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
- if (node->addr != addr)
+ if (node->addr != addr || node->preliminary)
continue;
if (!kref_get_unless_zero(&node->kref))
node = NULL;
@@ -398,19 +441,41 @@ static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
}
}
-static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
- u8 *peer_id, u16 capabilities,
- u32 signature, u32 hash_mixes)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
struct tipc_link *l;
+ unsigned long intv;
int bearer_id;
int i;
spin_lock_bh(&tn->node_list_lock);
- n = tipc_node_find(net, addr);
+ n = tipc_node_find(net, addr) ?:
+ tipc_node_find_by_id(net, peer_id);
if (n) {
+ if (!n->preliminary)
+ goto update;
+ if (preliminary)
+ goto exit;
+ /* A preliminary node becomes "real" now, refresh its data */
+ tipc_node_write_lock(n);
+ n->preliminary = false;
+ n->addr = addr;
+ hlist_del_rcu(&n->hash);
+ hlist_add_head_rcu(&n->hash,
+ &tn->node_htable[tipc_hashfn(addr)]);
+ list_del_rcu(&n->list);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ tipc_node_write_unlock_fast(n);
+
+update:
if (n->peer_hash_mix ^ hash_mixes)
tipc_node_assign_peer_net(n, hash_mixes);
if (n->capabilities == capabilities)
@@ -438,7 +503,17 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
pr_warn("Node creation failed, no memory\n");
goto exit;
}
+ tipc_nodeid2string(n->peer_id_string, peer_id);
+#ifdef CONFIG_TIPC_CRYPTO
+ if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
+ pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
+ kfree(n);
+ n = NULL;
+ goto exit;
+ }
+#endif
n->addr = addr;
+ n->preliminary = preliminary;
memcpy(&n->peer_id, peer_id, 16);
n->net = net;
n->peer_net = NULL;
@@ -463,26 +538,14 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
n->active_links[1] = INVALID_BEARER_ID;
- if (!tipc_link_bc_create(net, tipc_own_addr(net),
- addr, U16_MAX,
- tipc_link_window(tipc_bc_sndlink(net)),
- n->capabilities,
- &n->bc_entry.inputq1,
- &n->bc_entry.namedq,
- tipc_bc_sndlink(net),
- &n->bc_entry.link)) {
- pr_warn("Broadcast rcv link creation failed, no memory\n");
- if (n->peer_net) {
- n->peer_net = NULL;
- n->peer_hash_mix = 0;
- }
- kfree(n);
- n = NULL;
- goto exit;
- }
+ n->bc_entry.link = NULL;
tipc_node_get(n);
timer_setup(&n->timer, tipc_node_timeout, 0);
- n->keepalive_intv = U32_MAX;
+ /* Start a slow timer anyway, crypto needs it */
+ n->keepalive_intv = 10000;
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
+ tipc_node_get(n);
hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n->addr < temp_node->addr)
@@ -667,6 +730,11 @@ static bool tipc_node_cleanup(struct tipc_node *peer)
}
tipc_node_write_unlock(peer);
+ if (!deleted) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+ }
+
/* Calculate cluster capabilities */
tn->capabilities = TIPC_NODE_CAPABILITIES;
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -695,6 +763,10 @@ static void tipc_node_timeout(struct timer_list *t)
return;
}
+#ifdef CONFIG_TIPC_CRYPTO
+ /* Take any crypto key related actions first */
+ tipc_crypto_timeout(n->crypto_rx);
+#endif
__skb_queue_head_init(&xmitq);
/* Initial node interval to value larger (10 seconds), then it will be
@@ -715,7 +787,7 @@ static void tipc_node_timeout(struct timer_list *t)
remains--;
}
tipc_node_read_unlock(n);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
@@ -747,7 +819,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
n->link_id = tipc_link_id(nl);
/* Leave room for tunnel header when returning 'mtu' to users: */
- n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
+ n->links[bearer_id].mtu = tipc_link_mss(nl);
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
@@ -801,7 +873,7 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
tipc_node_write_lock(n);
__tipc_node_link_up(n, bearer_id, xmitq);
maddr = &n->links[bearer_id].maddr;
- tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
+ tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
tipc_node_write_unlock(n);
}
@@ -956,7 +1028,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
tipc_sk_rcv(n->net, &le->inputq);
}
@@ -1000,6 +1072,8 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_node *n;
+ bool preliminary;
+ u32 sugg_addr;
/* Suggest new address if some other peer is using this one */
n = tipc_node_find(net, addr);
@@ -1015,9 +1089,11 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
/* Suggest previously used address if peer is known */
n = tipc_node_find_by_id(net, id);
if (n) {
- addr = n->addr;
+ sugg_addr = n->addr;
+ preliminary = n->preliminary;
tipc_node_put(n);
- return addr;
+ if (!preliminary)
+ return sugg_addr;
}
/* Even this node may be in conflict */
@@ -1034,7 +1110,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool *respond, bool *dupl_addr)
{
struct tipc_node *n;
- struct tipc_link *l;
+ struct tipc_link *l, *snd_l;
struct tipc_link_entry *le;
bool addr_match = false;
bool sign_match = false;
@@ -1048,12 +1124,27 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*dupl_addr = false;
*respond = false;
- n = tipc_node_create(net, addr, peer_id, capabilities, signature,
- hash_mixes);
+ n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
+ false);
if (!n)
return;
tipc_node_write_lock(n);
+ if (unlikely(!n->bc_entry.link)) {
+ snd_l = tipc_bc_sndlink(net);
+ if (!tipc_link_bc_create(net, tipc_own_addr(net),
+ addr, U16_MAX,
+ tipc_link_window(snd_l),
+ n->capabilities,
+ &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l,
+ &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no mem\n");
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ return;
+ }
+ }
le = &n->links[b->identity];
@@ -1068,6 +1159,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
if (sign_match && addr_match && link_up) {
/* All is fine. Do nothing. */
reset = false;
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
} else if (sign_match && addr_match && !link_up) {
/* Respond. The link will come up in due time */
*respond = true;
@@ -1393,11 +1487,8 @@ static void node_lost_contact(struct tipc_node *n,
/* Notify publications from this node */
n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
-
- if (n->peer_net) {
- n->peer_net = NULL;
- n->peer_hash_mix = 0;
- }
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1591,7 +1682,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
if (unlikely(rc == -ENOBUFS))
tipc_node_link_down(n, bearer_id, false);
else
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
tipc_node_put(n);
@@ -1739,7 +1830,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
}
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
if (!skb_queue_empty(&be->inputq1))
tipc_node_mcast_rcv(n);
@@ -1917,20 +2008,38 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
struct sk_buff_head xmitq;
- struct tipc_node *n;
+ struct tipc_link_entry *le;
struct tipc_msg *hdr;
+ struct tipc_node *n;
int bearer_id = b->identity;
- struct tipc_link_entry *le;
u32 self = tipc_own_addr(net);
int usr, rc = 0;
u16 bc_ack;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_ehdr *ehdr;
- __skb_queue_head_init(&xmitq);
+ /* Check if message must be decrypted first */
+ if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
+ goto rcv;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (likely(ehdr->user != LINK_CONFIG)) {
+ n = tipc_node_find(net, ntohl(ehdr->addr));
+ if (unlikely(!n))
+ goto discard;
+ } else {
+ n = tipc_node_find_by_id(net, ehdr->id);
+ }
+ tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ if (!skb)
+ return;
+rcv:
+#endif
/* Ensure message is well-formed before touching the header */
- TIPC_SKB_CB(skb)->validated = false;
if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
+ __skb_queue_head_init(&xmitq);
hdr = buf_msg(skb);
usr = msg_user(hdr);
bc_ack = msg_bcast_ack(hdr);
@@ -2001,7 +2110,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
tipc_sk_rcv(net, &le->inputq);
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
tipc_node_put(n);
discard:
@@ -2032,7 +2141,7 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
tipc_link_set_mtu(e->link, b->mtu);
}
tipc_node_write_unlock(n);
- tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
}
rcu_read_unlock();
@@ -2043,7 +2152,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
- struct tipc_node *peer;
+ struct tipc_node *peer, *temp_node;
u32 addr;
int err;
@@ -2084,6 +2193,11 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
tipc_node_write_unlock(peer);
tipc_node_delete(peer);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
err = 0;
err_out:
tipc_node_put(peer);
@@ -2128,6 +2242,8 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
list_for_each_entry_rcu(node, &tn->node_list, list) {
+ if (node->preliminary)
+ continue;
if (last_addr) {
if (node->addr == last_addr)
last_addr = 0;
@@ -2267,7 +2383,8 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
out:
tipc_node_read_unlock(node);
- tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
+ NULL);
return res;
}
@@ -2643,10 +2760,140 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
return skb->len;
}
-u32 tipc_node_get_addr(struct tipc_node *node)
+#ifdef CONFIG_TIPC_CRYPTO
+static int tipc_nl_retrieve_key(struct nlattr **attrs,
+ struct tipc_aead_key **key)
{
- return (node) ? node->addr : 0;
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+
+ if (!attr)
+ return -ENODATA;
+
+ *key = (struct tipc_aead_key *)nla_data(attr);
+ if (nla_len(attr) < tipc_aead_key_size(*key))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < TIPC_NODEID_LEN)
+ return -EINVAL;
+
+ *node_id = (u8 *)nla_data(attr);
+ return 0;
+}
+
+int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n = NULL;
+ struct tipc_aead_key *ukey;
+ struct tipc_crypto *c;
+ u8 *id, *own_id;
+ int rc = 0;
+
+ if (!info->attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
+ info->attrs[TIPC_NLA_NODE],
+ tipc_nl_node_policy, info->extack);
+ if (rc)
+ goto exit;
+
+ own_id = tipc_own_id(net);
+ if (!own_id) {
+ rc = -EPERM;
+ goto exit;
+ }
+
+ rc = tipc_nl_retrieve_key(attrs, &ukey);
+ if (rc)
+ goto exit;
+
+ rc = tipc_aead_key_validate(ukey);
+ if (rc)
+ goto exit;
+
+ rc = tipc_nl_retrieve_nodeid(attrs, &id);
+ switch (rc) {
+ case -ENODATA:
+ /* Cluster key mode */
+ rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
+ break;
+ case 0:
+ /* Per-node key mode */
+ if (!memcmp(id, own_id, NODE_ID_LEN)) {
+ c = tn->crypto_tx;
+ } else {
+ n = tipc_node_find_by_id(net, id) ?:
+ tipc_node_create(net, 0, id, 0xffffu, 0, true);
+ if (unlikely(!n)) {
+ rc = -ENOMEM;
+ break;
+ }
+ c = n->crypto_rx;
+ }
+
+ rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
+ if (n)
+ tipc_node_put(n);
+ break;
+ default:
+ break;
+ }
+
+exit:
+ return (rc < 0) ? rc : 0;
+}
+
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_set_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+int __tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+
+ tipc_crypto_key_flush(tn->crypto_tx);
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list)
+ tipc_crypto_key_flush(n->crypto_rx);
+ rcu_read_unlock();
+
+ pr_info("All keys are flushed!\n");
+ return 0;
+}
+
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_flush_key(skb, info);
+ rtnl_unlock();
+
+ return err;
}
+#endif
/**
* tipc_node_dump - dump TIPC node data
diff --git a/net/tipc/node.h b/net/tipc/node.h
index c39cd861c07d..a6803b449a2c 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -75,6 +75,15 @@ enum {
void tipc_node_stop(struct net *net);
bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
u32 tipc_node_get_addr(struct tipc_node *node);
+char *tipc_node_get_id_str(struct tipc_node *node);
+void tipc_node_put(struct tipc_node *node);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary);
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+#endif
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
struct tipc_bearer *bearer,
@@ -110,5 +119,9 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
struct netlink_callback *cb);
+#ifdef CONFIG_TIPC_CRYPTO
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info);
+#endif
void tipc_node_pre_cleanup_net(struct net *exit_net);
#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 6159d327db76..58ab3d6dcdce 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -35,6 +35,7 @@
#include "core.h"
#include "trace.h"
+#include "crypto.h"
#include <linux/sysctl.h>
@@ -64,6 +65,16 @@ static struct ctl_table tipc_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .procname = "max_tfms",
+ .data = &sysctl_tipc_max_tfms,
+ .maxlen = sizeof(sysctl_tipc_max_tfms),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+#endif
{}
};
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 43ca5fd6574d..86aaa4d3e781 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -372,6 +372,7 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
goto out;
if (b && test_bit(0, &b->up)) {
+ TIPC_SKB_CB(skb)->flags = 0;
tipc_rcv(sock_net(sk), skb, b);
return 0;
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 33b267b052c0..0683788bbef0 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -534,8 +534,10 @@ last_record:
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
int rc;
+ mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
@@ -549,12 +551,14 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
out:
release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
return rc;
}
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
struct iov_iter msg_iter;
char *kaddr = kmap(page);
struct kvec iov;
@@ -563,6 +567,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
+ mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
if (flags & MSG_OOB) {
@@ -579,6 +584,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
out:
release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
return rc;
}
@@ -634,9 +640,11 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
{
- if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
+ if (tls_is_partially_sent_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
+ WARN_ON_ONCE(sk->sk_write_pending);
+
sk->sk_allocation = GFP_ATOMIC;
tls_push_partial_record(sk, ctx,
MSG_DONTWAIT | MSG_NOSIGNAL |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index f144b965704e..5bb93dd5762b 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -267,6 +267,7 @@ void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+ mutex_destroy(&ctx->tx_lock);
if (sk)
kfree_rcu(ctx, rcu);
@@ -627,6 +628,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
if (!ctx)
return NULL;
+ mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = sk->sk_prot;
return ctx;
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
index 83d9c80a684e..3a5dd1e07233 100644
--- a/net/tls/tls_proc.c
+++ b/net/tls/tls_proc.c
@@ -6,6 +6,7 @@
#include <net/snmp.h>
#include <net/tls.h>
+#ifdef CONFIG_PROC_FS
static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
@@ -32,6 +33,7 @@ static int tls_statistics_seq_show(struct seq_file *seq, void *v)
return 0;
}
+#endif
int __net_init tls_proc_init(struct net *net)
{
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index de7561d4cfa5..141da093ff04 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -902,15 +902,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -ENOTSUPP;
+ mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
- /* Wait till there is any pending write on socket */
- if (unlikely(sk->sk_write_pending)) {
- ret = wait_on_pending_writer(sk, &timeo);
- if (unlikely(ret))
- goto send_end;
- }
-
if (unlikely(msg->msg_controllen)) {
ret = tls_proccess_cmsg(sk, msg, &record_type);
if (ret) {
@@ -1096,6 +1090,7 @@ send_end:
ret = sk_stream_error(sk, msg->msg_flags, ret);
release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
return copied ? copied : ret;
}
@@ -1119,13 +1114,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- /* Wait till there is any pending write on socket */
- if (unlikely(sk->sk_write_pending)) {
- ret = wait_on_pending_writer(sk, &timeo);
- if (unlikely(ret))
- goto sendpage_end;
- }
-
/* Call the sk_stream functions to manage the sndbuf mem. */
while (size > 0) {
size_t copy, required_size;
@@ -1224,15 +1212,18 @@ sendpage_end:
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
int ret;
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
return -ENOTSUPP;
+ mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
return ret;
}
@@ -2175,9 +2166,11 @@ static void tx_work_handler(struct work_struct *work)
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
return;
+ mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
tls_tx_records(sk, -1);
release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
}
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
@@ -2185,12 +2178,9 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* Schedule the transmission if tx list is ready */
- if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
- /* Schedule the transmission */
- if (!test_and_set_bit(BIT_TX_SCHEDULED,
- &tx_ctx->tx_bitmask))
- schedule_delayed_work(&tx_ctx->tx_work.work, 0);
- }
+ if (is_tx_ready(tx_ctx) &&
+ !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
+ schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 582a3e4dfce2..cc8659838bf2 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -126,19 +126,18 @@ static struct proto vsock_proto = {
*/
#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
-static const struct vsock_transport *transport;
+#define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
+#define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
+#define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
+
+/* Transport used for host->guest communication */
+static const struct vsock_transport *transport_h2g;
+/* Transport used for guest->host communication */
+static const struct vsock_transport *transport_g2h;
+/* Transport used for DGRAM communication */
+static const struct vsock_transport *transport_dgram;
static DEFINE_MUTEX(vsock_register_mutex);
-/**** EXPORTS ****/
-
-/* Get the ID of the local context. This is transport dependent. */
-
-int vm_sockets_get_local_cid(void)
-{
- return transport->get_local_cid();
-}
-EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
-
/**** UTILS ****/
/* Each bound VSocket is stored in the bind hash table and each connected
@@ -188,7 +187,7 @@ static int vsock_auto_bind(struct vsock_sock *vsk)
return __vsock_bind(sk, &local_addr);
}
-static int __init vsock_init_tables(void)
+static void vsock_init_tables(void)
{
int i;
@@ -197,7 +196,6 @@ static int __init vsock_init_tables(void)
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
INIT_LIST_HEAD(&vsock_connected_table[i]);
- return 0;
}
static void __vsock_insert_bound(struct list_head *list,
@@ -230,9 +228,15 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct vsock_sock *vsk;
- list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
- if (addr->svm_port == vsk->local_addr.svm_port)
+ list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
+ if (vsock_addr_equals_addr(addr, &vsk->local_addr))
+ return sk_vsock(vsk);
+
+ if (addr->svm_port == vsk->local_addr.svm_port &&
+ (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
+ addr->svm_cid == VMADDR_CID_ANY))
return sk_vsock(vsk);
+ }
return NULL;
}
@@ -382,6 +386,81 @@ void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
}
EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
+static void vsock_deassign_transport(struct vsock_sock *vsk)
+{
+ if (!vsk->transport)
+ return;
+
+ vsk->transport->destruct(vsk);
+ module_put(vsk->transport->module);
+ vsk->transport = NULL;
+}
+
+/* Assign a transport to a socket and call the .init transport callback.
+ *
+ * Note: for stream socket this must be called when vsk->remote_addr is set
+ * (e.g. during the connect() or when a connection request on a listener
+ * socket is received).
+ * The vsk->remote_addr is used to decide which transport to use:
+ * - remote CID <= VMADDR_CID_HOST will use guest->host transport;
+ * - remote CID == local_cid (guest->host transport) will use guest->host
+ * transport for loopback (host->guest transports don't support loopback);
+ * - remote CID > VMADDR_CID_HOST will use host->guest transport;
+ */
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+{
+ const struct vsock_transport *new_transport;
+ struct sock *sk = sk_vsock(vsk);
+ unsigned int remote_cid = vsk->remote_addr.svm_cid;
+
+ switch (sk->sk_type) {
+ case SOCK_DGRAM:
+ new_transport = transport_dgram;
+ break;
+ case SOCK_STREAM:
+ if (remote_cid <= VMADDR_CID_HOST ||
+ (transport_g2h &&
+ remote_cid == transport_g2h->get_local_cid()))
+ new_transport = transport_g2h;
+ else
+ new_transport = transport_h2g;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
+
+ if (vsk->transport) {
+ if (vsk->transport == new_transport)
+ return 0;
+
+ vsk->transport->release(vsk);
+ vsock_deassign_transport(vsk);
+ }
+
+ /* We increase the module refcnt to prevent the transport unloading
+ * while there are open sockets assigned to it.
+ */
+ if (!new_transport || !try_module_get(new_transport->module))
+ return -ENODEV;
+
+ vsk->transport = new_transport;
+
+ return vsk->transport->init(vsk, psk);
+}
+EXPORT_SYMBOL_GPL(vsock_assign_transport);
+
+bool vsock_find_cid(unsigned int cid)
+{
+ if (transport_g2h && cid == transport_g2h->get_local_cid())
+ return true;
+
+ if (transport_h2g && cid == VMADDR_CID_HOST)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vsock_find_cid);
+
static struct sock *vsock_dequeue_accept(struct sock *listener)
{
struct vsock_sock *vlistener;
@@ -418,7 +497,12 @@ static bool vsock_is_pending(struct sock *sk)
static int vsock_send_shutdown(struct sock *sk, int mode)
{
- return transport->shutdown(vsock_sk(sk), mode);
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ if (!vsk->transport)
+ return -ENODEV;
+
+ return vsk->transport->shutdown(vsk, mode);
}
static void vsock_pending_work(struct work_struct *work)
@@ -439,7 +523,7 @@ static void vsock_pending_work(struct work_struct *work)
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
- listener->sk_ack_backlog--;
+ sk_acceptq_removed(listener);
} else if (!vsk->rejected) {
/* We are not on the pending list and accept() did not reject
* us, so we must have been accepted by our user process. We
@@ -528,13 +612,12 @@ static int __vsock_bind_stream(struct vsock_sock *vsk,
static int __vsock_bind_dgram(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- return transport->dgram_bind(vsk, addr);
+ return vsk->transport->dgram_bind(vsk, addr);
}
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
{
struct vsock_sock *vsk = vsock_sk(sk);
- u32 cid;
int retval;
/* First ensure this socket isn't already bound. */
@@ -544,10 +627,9 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
/* Now bind to the provided address or select appropriate values if
* none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
* like AF_INET prevents binding to a non-local IP address (in most
- * cases), we only allow binding to the local CID.
+ * cases), we only allow binding to a local CID.
*/
- cid = transport->get_local_cid();
- if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
+ if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
return -EADDRNOTAVAIL;
switch (sk->sk_socket->type) {
@@ -571,12 +653,12 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
static void vsock_connect_timeout(struct work_struct *work);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority,
- unsigned short type,
- int kern)
+static struct sock *__vsock_create(struct net *net,
+ struct socket *sock,
+ struct sock *parent,
+ gfp_t priority,
+ unsigned short type,
+ int kern)
{
struct sock *sk;
struct vsock_sock *psk;
@@ -620,28 +702,24 @@ struct sock *__vsock_create(struct net *net,
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
+ vsk->buffer_size = psk->buffer_size;
+ vsk->buffer_min_size = psk->buffer_min_size;
+ vsk->buffer_max_size = psk->buffer_max_size;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
+ vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
+ vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
+ vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
}
- if (transport->init(vsk, psk) < 0) {
- sk_free(sk);
- return NULL;
- }
-
- if (sock)
- vsock_insert_unbound(vsk);
-
return sk;
}
-EXPORT_SYMBOL_GPL(__vsock_create);
static void __vsock_release(struct sock *sk, int level)
{
if (sk) {
- struct sk_buff *skb;
struct sock *pending;
struct vsock_sock *vsk;
@@ -651,7 +729,10 @@ static void __vsock_release(struct sock *sk, int level)
/* The release call is supposed to use lock_sock_nested()
* rather than lock_sock(), if a sock lock should be acquired.
*/
- transport->release(vsk);
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sk->sk_type == SOCK_STREAM)
+ vsock_remove_sock(vsk);
/* When "level" is SINGLE_DEPTH_NESTING, use the nested
* version to avoid the warning "possible recursive locking
@@ -662,8 +743,7 @@ static void __vsock_release(struct sock *sk, int level)
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
- while ((skb = skb_dequeue(&sk->sk_receive_queue)))
- kfree_skb(skb);
+ skb_queue_purge(&sk->sk_receive_queue);
/* Clean up any sockets that never were accepted. */
while ((pending = vsock_dequeue_accept(sk)) != NULL) {
@@ -680,7 +760,7 @@ static void vsock_sk_destruct(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
- transport->destruct(vsk);
+ vsock_deassign_transport(vsk);
/* When clearing these addresses, there's no need to set the family and
* possibly register the address family with the kernel.
@@ -702,15 +782,22 @@ static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
}
+struct sock *vsock_create_connected(struct sock *parent)
+{
+ return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
+ parent->sk_type, 0);
+}
+EXPORT_SYMBOL_GPL(vsock_create_connected);
+
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
- return transport->stream_has_data(vsk);
+ return vsk->transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
- return transport->stream_has_space(vsk);
+ return vsk->transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
@@ -879,6 +966,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
+ const struct vsock_transport *transport = vsk->transport;
lock_sock(sk);
/* Listening sockets that have connections in their accept
@@ -889,7 +977,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* If there is something in the queue then we can read. */
- if (transport->stream_is_active(vsk) &&
+ if (transport && transport->stream_is_active(vsk) &&
!(sk->sk_shutdown & RCV_SHUTDOWN)) {
bool data_ready_now = false;
int ret = transport->notify_poll_in(
@@ -954,6 +1042,7 @@ static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
+ const struct vsock_transport *transport;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
@@ -962,6 +1051,7 @@ static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
lock_sock(sk);
@@ -1046,8 +1136,8 @@ static int vsock_dgram_connect(struct socket *sock,
if (err)
goto out;
- if (!transport->dgram_allow(remote_addr->svm_cid,
- remote_addr->svm_port)) {
+ if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
+ remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
@@ -1063,7 +1153,9 @@ out:
static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
+ struct vsock_sock *vsk = vsock_sk(sock->sk);
+
+ return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
}
static const struct proto_ops vsock_dgram_ops = {
@@ -1089,6 +1181,8 @@ static const struct proto_ops vsock_dgram_ops = {
static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
{
+ const struct vsock_transport *transport = vsk->transport;
+
if (!transport->cancel_pkt)
return -EOPNOTSUPP;
@@ -1125,6 +1219,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
int err;
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
struct sockaddr_vm *remote_addr;
long timeout;
DEFINE_WAIT(wait);
@@ -1159,19 +1254,26 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
goto out;
}
+ /* Set the remote address that we are connecting to. */
+ memcpy(&vsk->remote_addr, remote_addr,
+ sizeof(vsk->remote_addr));
+
+ err = vsock_assign_transport(vsk, NULL);
+ if (err)
+ goto out;
+
+ transport = vsk->transport;
+
/* The hypervisor and well-known contexts do not have socket
* endpoints.
*/
- if (!transport->stream_allow(remote_addr->svm_cid,
+ if (!transport ||
+ !transport->stream_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -ENETUNREACH;
goto out;
}
- /* Set the remote address that we are connecting to. */
- memcpy(&vsk->remote_addr, remote_addr,
- sizeof(vsk->remote_addr));
-
err = vsock_auto_bind(vsk);
if (err)
goto out;
@@ -1301,7 +1403,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
err = -listener->sk_err;
if (connected) {
- listener->sk_ack_backlog--;
+ sk_acceptq_removed(listener);
lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
@@ -1366,6 +1468,23 @@ out:
return err;
}
+static void vsock_update_buffer_size(struct vsock_sock *vsk,
+ const struct vsock_transport *transport,
+ u64 val)
+{
+ if (val > vsk->buffer_max_size)
+ val = vsk->buffer_max_size;
+
+ if (val < vsk->buffer_min_size)
+ val = vsk->buffer_min_size;
+
+ if (val != vsk->buffer_size &&
+ transport && transport->notify_buffer_size)
+ transport->notify_buffer_size(vsk, &val);
+
+ vsk->buffer_size = val;
+}
+
static int vsock_stream_setsockopt(struct socket *sock,
int level,
int optname,
@@ -1375,6 +1494,7 @@ static int vsock_stream_setsockopt(struct socket *sock,
int err;
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
u64 val;
if (level != AF_VSOCK)
@@ -1395,23 +1515,26 @@ static int vsock_stream_setsockopt(struct socket *sock,
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
lock_sock(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
- transport->set_buffer_size(vsk, val);
+ vsock_update_buffer_size(vsk, transport, val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
COPY_IN(val);
- transport->set_max_buffer_size(vsk, val);
+ vsk->buffer_max_size = val;
+ vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
COPY_IN(val);
- transport->set_min_buffer_size(vsk, val);
+ vsk->buffer_min_size = val;
+ vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
@@ -1478,17 +1601,17 @@ static int vsock_stream_getsockopt(struct socket *sock,
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
- val = transport->get_buffer_size(vsk);
+ val = vsk->buffer_size;
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
- val = transport->get_max_buffer_size(vsk);
+ val = vsk->buffer_max_size;
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
- val = transport->get_min_buffer_size(vsk);
+ val = vsk->buffer_min_size;
COPY_OUT(val);
break;
@@ -1519,6 +1642,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
ssize_t total_written;
long timeout;
int err;
@@ -1527,6 +1651,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
total_written = 0;
err = 0;
@@ -1548,7 +1673,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto out;
}
- if (sk->sk_state != TCP_ESTABLISHED ||
+ if (!transport || sk->sk_state != TCP_ESTABLISHED ||
!vsock_addr_bound(&vsk->local_addr)) {
err = -ENOTCONN;
goto out;
@@ -1658,6 +1783,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
{
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
int err;
size_t target;
ssize_t copied;
@@ -1668,11 +1794,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
err = 0;
lock_sock(sk);
- if (sk->sk_state != TCP_ESTABLISHED) {
+ if (!transport || sk->sk_state != TCP_ESTABLISHED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* peer has not connected or a local shutdown occured with the
@@ -1846,6 +1973,10 @@ static const struct proto_ops vsock_stream_ops = {
static int vsock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
+ struct vsock_sock *vsk;
+ struct sock *sk;
+ int ret;
+
if (!sock)
return -EINVAL;
@@ -1865,7 +1996,23 @@ static int vsock_create(struct net *net, struct socket *sock,
sock->state = SS_UNCONNECTED;
- return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
+ sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
+ if (!sk)
+ return -ENOMEM;
+
+ vsk = vsock_sk(sk);
+
+ if (sock->type == SOCK_DGRAM) {
+ ret = vsock_assign_transport(vsk, NULL);
+ if (ret < 0) {
+ sock_put(sk);
+ return ret;
+ }
+ }
+
+ vsock_insert_unbound(vsk);
+
+ return 0;
}
static const struct net_proto_family vsock_family_ops = {
@@ -1878,11 +2025,20 @@ static long vsock_dev_do_ioctl(struct file *filp,
unsigned int cmd, void __user *ptr)
{
u32 __user *p = ptr;
+ u32 cid = VMADDR_CID_ANY;
int retval = 0;
switch (cmd) {
case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
- if (put_user(transport->get_local_cid(), p) != 0)
+ /* To be compatible with the VMCI behavior, we prioritize the
+ * guest CID instead of well-know host CID (VMADDR_CID_HOST).
+ */
+ if (transport_g2h)
+ cid = transport_g2h->get_local_cid();
+ else if (transport_h2g)
+ cid = transport_h2g->get_local_cid();
+
+ if (put_user(cid, p) != 0)
retval = -EFAULT;
break;
@@ -1922,24 +2078,13 @@ static struct miscdevice vsock_device = {
.fops = &vsock_device_ops,
};
-int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
+static int __init vsock_init(void)
{
- int err = mutex_lock_interruptible(&vsock_register_mutex);
-
- if (err)
- return err;
-
- if (transport) {
- err = -EBUSY;
- goto err_busy;
- }
+ int err = 0;
- /* Transport must be the owner of the protocol so that it can't
- * unload while there are open sockets.
- */
- vsock_proto.owner = owner;
- transport = t;
+ vsock_init_tables();
+ vsock_proto.owner = THIS_MODULE;
vsock_device.minor = MISC_DYNAMIC_MINOR;
err = misc_register(&vsock_device);
if (err) {
@@ -1960,7 +2105,6 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
goto err_unregister_proto;
}
- mutex_unlock(&vsock_register_mutex);
return 0;
err_unregister_proto:
@@ -1968,44 +2112,86 @@ err_unregister_proto:
err_deregister_misc:
misc_deregister(&vsock_device);
err_reset_transport:
- transport = NULL;
-err_busy:
- mutex_unlock(&vsock_register_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(__vsock_core_init);
-void vsock_core_exit(void)
+static void __exit vsock_exit(void)
{
- mutex_lock(&vsock_register_mutex);
-
misc_deregister(&vsock_device);
sock_unregister(AF_VSOCK);
proto_unregister(&vsock_proto);
-
- /* We do not want the assignment below re-ordered. */
- mb();
- transport = NULL;
-
- mutex_unlock(&vsock_register_mutex);
}
-EXPORT_SYMBOL_GPL(vsock_core_exit);
-const struct vsock_transport *vsock_core_get_transport(void)
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
{
- /* vsock_register_mutex not taken since only the transport uses this
- * function and only while registered.
- */
- return transport;
+ return vsk->transport;
}
EXPORT_SYMBOL_GPL(vsock_core_get_transport);
-static void __exit vsock_exit(void)
+int vsock_core_register(const struct vsock_transport *t, int features)
{
- /* Do nothing. This function makes this module removable. */
+ const struct vsock_transport *t_h2g, *t_g2h, *t_dgram;
+ int err = mutex_lock_interruptible(&vsock_register_mutex);
+
+ if (err)
+ return err;
+
+ t_h2g = transport_h2g;
+ t_g2h = transport_g2h;
+ t_dgram = transport_dgram;
+
+ if (features & VSOCK_TRANSPORT_F_H2G) {
+ if (t_h2g) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_h2g = t;
+ }
+
+ if (features & VSOCK_TRANSPORT_F_G2H) {
+ if (t_g2h) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_g2h = t;
+ }
+
+ if (features & VSOCK_TRANSPORT_F_DGRAM) {
+ if (t_dgram) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_dgram = t;
+ }
+
+ transport_h2g = t_h2g;
+ transport_g2h = t_g2h;
+ transport_dgram = t_dgram;
+
+err_busy:
+ mutex_unlock(&vsock_register_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vsock_core_register);
+
+void vsock_core_unregister(const struct vsock_transport *t)
+{
+ mutex_lock(&vsock_register_mutex);
+
+ if (transport_h2g == t)
+ transport_h2g = NULL;
+
+ if (transport_g2h == t)
+ transport_g2h = NULL;
+
+ if (transport_dgram == t)
+ transport_dgram = NULL;
+
+ mutex_unlock(&vsock_register_mutex);
}
+EXPORT_SYMBOL_GPL(vsock_core_unregister);
-module_init(vsock_init_tables);
+module_init(vsock_init);
module_exit(vsock_exit);
MODULE_AUTHOR("VMware, Inc.");
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index bef8772116ec..3c7d07a99fc5 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -165,6 +165,8 @@ static const guid_t srv_id_template =
GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3);
+static bool hvs_check_transport(struct vsock_sock *vsk);
+
static bool is_valid_srv_id(const guid_t *id)
{
return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4);
@@ -188,7 +190,8 @@ static void hvs_remote_addr_init(struct sockaddr_vm *remote,
static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
struct sock *sk;
- vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+ /* Remote peer is always the host */
+ vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY);
while (1) {
/* Wrap around ? */
@@ -360,13 +363,24 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
goto out;
- new = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ new = vsock_create_connected(sk);
if (!new)
goto out;
new->sk_state = TCP_SYN_SENT;
vnew = vsock_sk(new);
+
+ hvs_addr_init(&vnew->local_addr, if_type);
+ hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
+
+ ret = vsock_assign_transport(vnew, vsock_sk(sk));
+ /* Transport assigned (looking at remote_addr) must be the
+ * same where we received the request.
+ */
+ if (ret || !hvs_check_transport(vnew)) {
+ sock_put(new);
+ goto out;
+ }
hvs_new = vnew->trans;
hvs_new->chan = chan;
} else {
@@ -428,10 +442,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if (conn_from_host) {
new->sk_state = TCP_ESTABLISHED;
- sk->sk_ack_backlog++;
-
- hvs_addr_init(&vnew->local_addr, if_type);
- hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
+ sk_acceptq_added(sk);
hvs_new->vm_srv_id = *if_type;
hvs_new->host_srv_id = *if_instance;
@@ -845,37 +856,9 @@ int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
return 0;
}
-static void hvs_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static void hvs_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static void hvs_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static u64 hvs_get_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
-static u64 hvs_get_min_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
-static u64 hvs_get_max_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
static struct vsock_transport hvs_transport = {
+ .module = THIS_MODULE,
+
.get_local_cid = hvs_get_local_cid,
.init = hvs_sock_init,
@@ -908,14 +891,13 @@ static struct vsock_transport hvs_transport = {
.notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
.notify_send_post_enqueue = hvs_notify_send_post_enqueue,
- .set_buffer_size = hvs_set_buffer_size,
- .set_min_buffer_size = hvs_set_min_buffer_size,
- .set_max_buffer_size = hvs_set_max_buffer_size,
- .get_buffer_size = hvs_get_buffer_size,
- .get_min_buffer_size = hvs_get_min_buffer_size,
- .get_max_buffer_size = hvs_get_max_buffer_size,
};
+static bool hvs_check_transport(struct vsock_sock *vsk)
+{
+ return vsk->transport == &hvs_transport;
+}
+
static int hvs_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -964,7 +946,7 @@ static int __init hvs_init(void)
if (ret != 0)
return ret;
- ret = vsock_core_init(&hvs_transport);
+ ret = vsock_core_register(&hvs_transport, VSOCK_TRANSPORT_F_G2H);
if (ret) {
vmbus_driver_unregister(&hvs_drv);
return ret;
@@ -975,7 +957,7 @@ static int __init hvs_init(void)
static void __exit hvs_exit(void)
{
- vsock_core_exit();
+ vsock_core_unregister(&hvs_transport);
vmbus_driver_unregister(&hvs_drv);
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 082a30936690..1458c5c8b64d 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -86,33 +86,6 @@ out_rcu:
return ret;
}
-static void virtio_transport_loopback_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, loopback_work);
- LIST_HEAD(pkts);
-
- spin_lock_bh(&vsock->loopback_list_lock);
- list_splice_init(&vsock->loopback_list, &pkts);
- spin_unlock_bh(&vsock->loopback_list_lock);
-
- mutex_lock(&vsock->rx_lock);
-
- if (!vsock->rx_run)
- goto out;
-
- while (!list_empty(&pkts)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
-
- virtio_transport_recv_pkt(pkt);
- }
-out:
- mutex_unlock(&vsock->rx_lock);
-}
-
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
struct virtio_vsock_pkt *pkt)
{
@@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
return val < virtqueue_get_vring_size(vq);
}
-static void virtio_transport_rx_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, rx_work);
- struct virtqueue *vq;
-
- vq = vsock->vqs[VSOCK_VQ_RX];
-
- mutex_lock(&vsock->rx_lock);
-
- if (!vsock->rx_run)
- goto out;
-
- do {
- virtqueue_disable_cb(vq);
- for (;;) {
- struct virtio_vsock_pkt *pkt;
- unsigned int len;
-
- if (!virtio_transport_more_replies(vsock)) {
- /* Stop rx until the device processes already
- * pending replies. Leave rx virtqueue
- * callbacks disabled.
- */
- goto out;
- }
-
- pkt = virtqueue_get_buf(vq, &len);
- if (!pkt) {
- break;
- }
-
- vsock->rx_buf_nr--;
-
- /* Drop short/long packets */
- if (unlikely(len < sizeof(pkt->hdr) ||
- len > sizeof(pkt->hdr) + pkt->len)) {
- virtio_transport_free_pkt(pkt);
- continue;
- }
-
- pkt->len = len - sizeof(pkt->hdr);
- virtio_transport_deliver_tap_pkt(pkt);
- virtio_transport_recv_pkt(pkt);
- }
- } while (!virtqueue_enable_cb(vq));
-
-out:
- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
- virtio_vsock_rx_fill(vsock);
- mutex_unlock(&vsock->rx_lock);
-}
-
/* event_lock must be held */
static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
struct virtio_vsock_event *event)
@@ -542,6 +462,8 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
static struct virtio_transport virtio_transport = {
.transport = {
+ .module = THIS_MODULE,
+
.get_local_cid = virtio_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
@@ -574,18 +496,92 @@ static struct virtio_transport virtio_transport = {
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
},
.send_pkt = virtio_transport_send_pkt,
};
+static void virtio_transport_loopback_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, loopback_work);
+ LIST_HEAD(pkts);
+
+ spin_lock_bh(&vsock->loopback_list_lock);
+ list_splice_init(&vsock->loopback_list, &pkts);
+ spin_unlock_bh(&vsock->loopback_list_lock);
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ while (!list_empty(&pkts)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+out:
+ mutex_unlock(&vsock->rx_lock);
+}
+
+static void virtio_transport_rx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, rx_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+ /* Stop rx until the device processes already
+ * pending replies. Leave rx virtqueue
+ * callbacks disabled.
+ */
+ goto out;
+ }
+
+ pkt = virtqueue_get_buf(vq, &len);
+ if (!pkt) {
+ break;
+ }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+ if (unlikely(len < sizeof(pkt->hdr) ||
+ len > sizeof(pkt->hdr) + pkt->len)) {
+ virtio_transport_free_pkt(pkt);
+ continue;
+ }
+
+ pkt->len = len - sizeof(pkt->hdr);
+ virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+out:
+ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+}
+
static int virtio_vsock_probe(struct virtio_device *vdev)
{
vq_callback_t *callbacks[] = {
@@ -776,7 +772,8 @@ static int __init virtio_vsock_init(void)
if (!virtio_vsock_workqueue)
return -ENOMEM;
- ret = vsock_core_init(&virtio_transport.transport);
+ ret = vsock_core_register(&virtio_transport.transport,
+ VSOCK_TRANSPORT_F_G2H);
if (ret)
goto out_wq;
@@ -787,7 +784,7 @@ static int __init virtio_vsock_init(void)
return 0;
out_vci:
- vsock_core_exit();
+ vsock_core_unregister(&virtio_transport.transport);
out_wq:
destroy_workqueue(virtio_vsock_workqueue);
return ret;
@@ -796,7 +793,7 @@ out_wq:
static void __exit virtio_vsock_exit(void)
{
unregister_virtio_driver(&virtio_vsock_driver);
- vsock_core_exit();
+ vsock_core_unregister(&virtio_transport.transport);
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index d02c9b41a768..e5ea29c6bca7 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -29,9 +29,10 @@
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
-static const struct virtio_transport *virtio_transport_get_ops(void)
+static const struct virtio_transport *
+virtio_transport_get_ops(struct vsock_sock *vsk)
{
- const struct vsock_transport *t = vsock_core_get_transport();
+ const struct vsock_transport *t = vsock_core_get_transport(vsk);
return container_of(t, struct virtio_transport, transport);
}
@@ -168,7 +169,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt *pkt;
u32 pkt_len = info->pkt_len;
- src_cid = vm_sockets_get_local_cid();
+ src_cid = virtio_transport_get_ops(vsk)->transport.get_local_cid();
src_port = vsk->local_addr.svm_port;
if (!info->remote_cid) {
dst_cid = vsk->remote_addr.svm_cid;
@@ -201,7 +202,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
virtio_transport_inc_tx_pkt(vvs, pkt);
- return virtio_transport_get_ops()->send_pkt(pkt);
+ return virtio_transport_get_ops(vsk)->send_pkt(pkt);
}
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -452,20 +453,16 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
vsk->trans = vvs;
vvs->vsk = vsk;
- if (psk) {
+ if (psk && psk->trans) {
struct virtio_vsock_sock *ptrans = psk->trans;
- vvs->buf_size = ptrans->buf_size;
- vvs->buf_size_min = ptrans->buf_size_min;
- vvs->buf_size_max = ptrans->buf_size_max;
vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
- } else {
- vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
- vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
- vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
}
- vvs->buf_alloc = vvs->buf_size;
+ if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
+
+ vvs->buf_alloc = vsk->buffer_size;
spin_lock_init(&vvs->rx_lock);
spin_lock_init(&vvs->tx_lock);
@@ -475,71 +472,20 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
-u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
-
-u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
+/* sk_lock held by the caller */
+void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- return vvs->buf_size_min;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
+ if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ *val = VIRTIO_VSOCK_MAX_BUF_SIZE;
-u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size_max;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
-
-void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val < vvs->buf_size_min)
- vvs->buf_size_min = val;
- if (val > vvs->buf_size_max)
- vvs->buf_size_max = val;
- vvs->buf_size = val;
- vvs->buf_alloc = val;
+ vvs->buf_alloc = *val;
virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
NULL);
}
-EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
-
-void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val > vvs->buf_size)
- vvs->buf_size = val;
- vvs->buf_size_min = val;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
-
-void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val < vvs->buf_size)
- vvs->buf_size = val;
- vvs->buf_size_max = val;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
+EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
@@ -631,9 +577,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size;
+ return vsk->buffer_size;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
@@ -745,9 +689,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
/* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist.
*/
-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
- const struct virtio_transport *t;
struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
@@ -767,7 +711,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (!reply)
return -ENOMEM;
- t = virtio_transport_get_ops();
if (!t) {
virtio_transport_free_pkt(reply);
return -ENOTCONN;
@@ -996,9 +939,11 @@ virtio_transport_recv_connected(struct sock *sk,
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
vsk->peer_shutdown |= SEND_SHUTDOWN;
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
- vsock_stream_has_data(vsk) <= 0) {
- sock_set_flag(sk, SOCK_DONE);
- sk->sk_state = TCP_CLOSING;
+ vsock_stream_has_data(vsk) <= 0 &&
+ !sock_flag(sk, SOCK_DONE)) {
+ (void)virtio_transport_reset(vsk, NULL);
+
+ virtio_transport_do_close(vsk, true);
}
if (le32_to_cpu(pkt->hdr.flags))
sk->sk_state_change(sk);
@@ -1041,13 +986,39 @@ virtio_transport_send_response(struct vsock_sock *vsk,
return virtio_transport_send_pkt_info(vsk, &info);
}
+static bool virtio_transport_space_update(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool space_available;
+
+ /* Listener sockets are not associated with any transport, so we are
+ * not able to take the state to see if there is space available in the
+ * remote peer, but since they are only used to receive requests, we
+ * can assume that there is always space available in the other peer.
+ */
+ if (!vvs)
+ return true;
+
+ /* buf_alloc and fwd_cnt is always included in the hdr */
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+ vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
+ space_available = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+ return space_available;
+}
+
/* Handle server socket */
static int
-virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
+virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ struct virtio_transport *t)
{
struct vsock_sock *vsk = vsock_sk(sk);
struct vsock_sock *vchild;
struct sock *child;
+ int ret;
if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
virtio_transport_reset(vsk, pkt);
@@ -1059,14 +1030,13 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
return -ENOMEM;
}
- child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ child = vsock_create_connected(sk);
if (!child) {
virtio_transport_reset(vsk, pkt);
return -ENOMEM;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
@@ -1078,6 +1048,20 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port));
+ ret = vsock_assign_transport(vchild, vsk);
+ /* Transport assigned (looking at remote_addr) must be the same
+ * where we received the request.
+ */
+ if (ret || vchild->transport != &t->transport) {
+ release_sock(child);
+ virtio_transport_reset(vsk, pkt);
+ sock_put(child);
+ return ret;
+ }
+
+ if (virtio_transport_space_update(child, pkt))
+ child->sk_write_space(child);
+
vsock_insert_connected(vchild);
vsock_enqueue_accept(sk, child);
virtio_transport_send_response(vchild, pkt);
@@ -1088,26 +1072,11 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
return 0;
}
-static bool virtio_transport_space_update(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
-{
- struct vsock_sock *vsk = vsock_sk(sk);
- struct virtio_vsock_sock *vvs = vsk->trans;
- bool space_available;
-
- /* buf_alloc and fwd_cnt is always included in the hdr */
- spin_lock_bh(&vvs->tx_lock);
- vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
- vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
- space_available = virtio_transport_has_space(vsk);
- spin_unlock_bh(&vvs->tx_lock);
- return space_available;
-}
-
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
@@ -1129,7 +1098,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
le32_to_cpu(pkt->hdr.fwd_cnt));
if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
@@ -1140,7 +1109,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
}
@@ -1159,7 +1128,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
switch (sk->sk_state) {
case TCP_LISTEN:
- virtio_transport_recv_listen(sk, pkt);
+ virtio_transport_recv_listen(sk, pkt, t);
virtio_transport_free_pkt(pkt);
break;
case TCP_SYN_SENT:
@@ -1177,6 +1146,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_free_pkt(pkt);
break;
}
+
release_sock(sk);
/* Release refcnt obtained when we fetched this socket out of the
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 8c9c4ed90fa7..644d32e43d23 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -57,6 +57,7 @@ static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
static u16 vmci_transport_new_proto_supported_versions(void);
static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
bool old_pkt_proto);
+static bool vmci_check_transport(struct vsock_sock *vsk);
struct vmci_transport_recv_pkt_info {
struct work_struct work;
@@ -74,15 +75,6 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
static int PROTOCOL_OVERRIDE = -1;
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
-
-/* The default peer timeout indicates how long we will wait for a peer response
- * to a control message.
- */
-#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
-
/* Helper function to convert from a VMCI error code to a VSock error code. */
static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
@@ -1013,8 +1005,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
return -ECONNREFUSED;
}
- pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ pending = vsock_create_connected(sk);
if (!pending) {
vmci_transport_send_reset(sk, pkt);
return -ENOMEM;
@@ -1027,14 +1018,24 @@ static int vmci_transport_recv_listen(struct sock *sk,
vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
pkt->src_port);
+ err = vsock_assign_transport(vpending, vsock_sk(sk));
+ /* Transport assigned (looking at remote_addr) must be the same
+ * where we received the request.
+ */
+ if (err || !vmci_check_transport(vpending)) {
+ vmci_transport_send_reset(sk, pkt);
+ sock_put(pending);
+ return err;
+ }
+
/* If the proposed size fits within our min/max, accept it. Otherwise
* propose our own size.
*/
- if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
- pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
+ if (pkt->u.size >= vpending->buffer_min_size &&
+ pkt->u.size <= vpending->buffer_max_size) {
qp_size = pkt->u.size;
} else {
- qp_size = vmci_trans(vpending)->queue_pair_size;
+ qp_size = vpending->buffer_size;
}
/* Figure out if we are using old or new requests based on the
@@ -1098,12 +1099,12 @@ static int vmci_transport_recv_listen(struct sock *sk,
}
vsock_add_pending(sk, pending);
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
pending->sk_state = TCP_SYN_SENT;
vmci_trans(vpending)->produce_size =
vmci_trans(vpending)->consume_size = qp_size;
- vmci_trans(vpending)->queue_pair_size = qp_size;
+ vpending->buffer_size = qp_size;
vmci_trans(vpending)->notify_ops->process_request(pending);
@@ -1397,8 +1398,8 @@ static int vmci_transport_recv_connecting_client_negotiate(
vsk->ignore_connecting_rst = false;
/* Verify that we're OK with the proposed queue pair size */
- if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
- pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
+ if (pkt->u.size < vsk->buffer_min_size ||
+ pkt->u.size > vsk->buffer_max_size) {
err = -EINVAL;
goto destroy;
}
@@ -1503,8 +1504,7 @@ vmci_transport_recv_connecting_client_invalid(struct sock *sk,
vsk->sent_request = false;
vsk->ignore_connecting_rst = true;
- err = vmci_transport_send_conn_request(
- sk, vmci_trans(vsk)->queue_pair_size);
+ err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
if (err < 0)
err = vmci_transport_error_to_vsock_error(err);
else
@@ -1588,21 +1588,6 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
vmci_trans(vsk)->sk = &vsk->sk;
spin_lock_init(&vmci_trans(vsk)->lock);
- if (psk) {
- vmci_trans(vsk)->queue_pair_size =
- vmci_trans(psk)->queue_pair_size;
- vmci_trans(vsk)->queue_pair_min_size =
- vmci_trans(psk)->queue_pair_min_size;
- vmci_trans(vsk)->queue_pair_max_size =
- vmci_trans(psk)->queue_pair_max_size;
- } else {
- vmci_trans(vsk)->queue_pair_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE;
- vmci_trans(vsk)->queue_pair_min_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
- vmci_trans(vsk)->queue_pair_max_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
- }
return 0;
}
@@ -1818,8 +1803,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
if (vmci_transport_old_proto_override(&old_pkt_proto) &&
old_pkt_proto) {
- err = vmci_transport_send_conn_request(
- sk, vmci_trans(vsk)->queue_pair_size);
+ err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
if (err < 0) {
sk->sk_state = TCP_CLOSE;
return err;
@@ -1827,8 +1811,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
} else {
int supported_proto_versions =
vmci_transport_new_proto_supported_versions();
- err = vmci_transport_send_conn_request2(
- sk, vmci_trans(vsk)->queue_pair_size,
+ err = vmci_transport_send_conn_request2(sk, vsk->buffer_size,
supported_proto_versions);
if (err < 0) {
sk->sk_state = TCP_CLOSE;
@@ -1881,46 +1864,6 @@ static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
}
-static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_size;
-}
-
-static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_min_size;
-}
-
-static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_max_size;
-}
-
-static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- if (val < vmci_trans(vsk)->queue_pair_min_size)
- vmci_trans(vsk)->queue_pair_min_size = val;
- if (val > vmci_trans(vsk)->queue_pair_max_size)
- vmci_trans(vsk)->queue_pair_max_size = val;
- vmci_trans(vsk)->queue_pair_size = val;
-}
-
-static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
- u64 val)
-{
- if (val > vmci_trans(vsk)->queue_pair_size)
- vmci_trans(vsk)->queue_pair_size = val;
- vmci_trans(vsk)->queue_pair_min_size = val;
-}
-
-static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
- u64 val)
-{
- if (val < vmci_trans(vsk)->queue_pair_size)
- vmci_trans(vsk)->queue_pair_size = val;
- vmci_trans(vsk)->queue_pair_max_size = val;
-}
-
static int vmci_transport_notify_poll_in(
struct vsock_sock *vsk,
size_t target,
@@ -2076,7 +2019,8 @@ static u32 vmci_transport_get_local_cid(void)
return vmci_get_context_id();
}
-static const struct vsock_transport vmci_transport = {
+static struct vsock_transport vmci_transport = {
+ .module = THIS_MODULE,
.init = vmci_transport_socket_init,
.destruct = vmci_transport_destruct,
.release = vmci_transport_release,
@@ -2103,15 +2047,26 @@ static const struct vsock_transport vmci_transport = {
.notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
.shutdown = vmci_transport_shutdown,
- .set_buffer_size = vmci_transport_set_buffer_size,
- .set_min_buffer_size = vmci_transport_set_min_buffer_size,
- .set_max_buffer_size = vmci_transport_set_max_buffer_size,
- .get_buffer_size = vmci_transport_get_buffer_size,
- .get_min_buffer_size = vmci_transport_get_min_buffer_size,
- .get_max_buffer_size = vmci_transport_get_max_buffer_size,
.get_local_cid = vmci_transport_get_local_cid,
};
+static bool vmci_check_transport(struct vsock_sock *vsk)
+{
+ return vsk->transport == &vmci_transport;
+}
+
+void vmci_vsock_transport_cb(bool is_host)
+{
+ int features;
+
+ if (is_host)
+ features = VSOCK_TRANSPORT_F_H2G;
+ else
+ features = VSOCK_TRANSPORT_F_G2H;
+
+ vsock_core_register(&vmci_transport, features);
+}
+
static int __init vmci_transport_init(void)
{
int err;
@@ -2128,7 +2083,6 @@ static int __init vmci_transport_init(void)
pr_err("Unable to create datagram handle. (%d)\n", err);
return vmci_transport_error_to_vsock_error(err);
}
-
err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
vmci_transport_qp_resumed_cb,
NULL, &vmci_transport_qp_resumed_sub_id);
@@ -2139,12 +2093,21 @@ static int __init vmci_transport_init(void)
goto err_destroy_stream_handle;
}
- err = vsock_core_init(&vmci_transport);
+ /* Register only with dgram feature, other features (H2G, G2H) will be
+ * registered when the first host or guest becomes active.
+ */
+ err = vsock_core_register(&vmci_transport, VSOCK_TRANSPORT_F_DGRAM);
if (err < 0)
goto err_unsubscribe;
+ err = vmci_register_vsock_callback(vmci_vsock_transport_cb);
+ if (err < 0)
+ goto err_unregister;
+
return 0;
+err_unregister:
+ vsock_core_unregister(&vmci_transport);
err_unsubscribe:
vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
err_destroy_stream_handle:
@@ -2170,7 +2133,8 @@ static void __exit vmci_transport_exit(void)
vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
}
- vsock_core_exit();
+ vmci_register_vsock_callback(NULL);
+ vsock_core_unregister(&vmci_transport);
}
module_exit(vmci_transport_exit);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index 1ca1e8640b31..b7b072194282 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -108,9 +108,6 @@ struct vmci_transport {
struct vmci_qp *qpair;
u64 produce_size;
u64 consume_size;
- u64 queue_pair_size;
- u64 queue_pair_min_size;
- u64 queue_pair_max_size;
u32 detach_sub_id;
union vmci_transport_notify notify;
const struct vmci_transport_notify_ops *notify_ops;
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
index 7843f08d4290..a1aa5a998c0e 100644
--- a/net/vmw_vsock/vmci_transport_notify.h
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include <linux/vm_sockets.h>
#include "vmci_transport.h"
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7186cb653c75..da5262b2298b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -624,6 +624,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.len = SAE_PASSWORD_MAX_LEN },
[NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG },
[NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy),
+ [NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
};
/* policy for the key attributes */
@@ -3940,6 +3941,10 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
key.type != NL80211_KEYTYPE_GROUP)
return -EINVAL;
+ if (key.type == NL80211_KEYTYPE_GROUP &&
+ info->attrs[NL80211_ATTR_VLAN_ID])
+ key.p.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (!rdev->ops->add_key)
return -EOPNOTSUPP;
@@ -5711,6 +5716,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_STA_AID])
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (info->attrs[NL80211_ATTR_VLAN_ID])
+ params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
@@ -5856,6 +5864,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
+ if (info->attrs[NL80211_ATTR_VLAN_ID])
+ params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
params.support_p2p_ps =
nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6aee9f5e8e71..c34f7d077604 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -891,7 +891,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
rc = 0;
out2:
@@ -1062,7 +1062,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
makex25->calluserdata.cudlength = skb->len;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
x25_insert_socket(make);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 9b599ed66d97..2c86a2fc3915 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -480,6 +480,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
+
+ if (encap_type == -1)
+ dev_put(skb->dev);
goto drop;
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 0f5131bc3342..7ac1542feaf8 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -732,30 +732,7 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
-{
- struct xfrm_if *xi;
- LIST_HEAD(list);
-
- xi = rtnl_dereference(xfrmn->xfrmi[0]);
- if (!xi)
- return;
-
- unregister_netdevice_queue(xi->dev, &list);
- unregister_netdevice_many(&list);
-}
-
-static void __net_exit xfrmi_exit_net(struct net *net)
-{
- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
-
- rtnl_lock();
- xfrmi_destroy_interfaces(xfrmn);
- rtnl_unlock();
-}
-
static struct pernet_operations xfrmi_net_ops = {
- .exit = xfrmi_exit_net,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index c6f3c4a1bd99..f3423562d933 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -495,6 +495,8 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
x->type->destructor(x);
xfrm_put_type(x->type);
}
+ if (x->xfrag.page)
+ put_page(x->xfrag.page);
xfrm_dev_state_free(x);
security_xfrm_state_free(x);
xfrm_state_free(x);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 8a9af3ab7769..1fc42ad8ff49 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -187,6 +187,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
TPROGS_CFLAGS += -I$(srctree)/tools/lib/
TPROGS_CFLAGS += -I$(srctree)/tools/include
TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
ifdef SYSROOT
TPROGS_CFLAGS += --sysroot=$(SYSROOT)
@@ -203,7 +204,7 @@ TPROGLDLIBS_test_overhead += -lrt
TPROGLDLIBS_xdpsock += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
-# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+# make M=samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
LLVM_OBJCOPY ?= llvm-objcopy
@@ -246,7 +247,7 @@ endif
# Trick to allow make to be run from this directory
all:
- $(MAKE) -C ../../ $(CURDIR)/ BPF_SAMPLES_PATH=$(CURDIR)
+ $(MAKE) -C ../../ M=$(CURDIR) BPF_SAMPLES_PATH=$(CURDIR)
clean:
$(MAKE) -C ../../ M=$(CURDIR) clean
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
index cc1f00a1ee06..dd34b2d26f1c 100644
--- a/samples/bpf/README.rst
+++ b/samples/bpf/README.rst
@@ -46,12 +46,10 @@ Compiling
For building the BPF samples, issue the below command from the kernel
top level directory::
- make samples/bpf/
-
-Do notice the "/" slash after the directory name.
+ make M=samples/bpf
It is also possible to call make from this directory. This will just
-hide the the invocation of make as above with the appended "/".
+hide the invocation of make as above.
Manually compiling LLVM with 'bpf' support
------------------------------------------
@@ -77,7 +75,7 @@ Quick sniplet for manually compiling LLVM and clang
It is also possible to point make to the newly compiled 'llc' or
'clang' command via redefining LLC or CLANG on the make command line::
- make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+ make M=samples/bpf LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
Cross compiling samples
-----------------------
@@ -98,10 +96,10 @@ Pointing LLC and CLANG is not necessarily if it's installed on HOST and have
in its targets appropriate arm64 arch (usually it has several arches).
Build samples::
- make samples/bpf/
+ make M=samples/bpf
Or build samples with SYSROOT if some header or library is absent in toolchain,
say libelf, providing address to file system containing headers and libs,
can be RFS of target board::
- make samples/bpf/ SYSROOT=~/some_sysroot
+ make M=samples/bpf SYSROOT=~/some_sysroot
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 7b7c2fafbc68..be984aa29b75 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -99,7 +99,8 @@ lx-symbols command."""
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
- for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
+ for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+ ".text", ".text.hot", ".text.unlikely"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
diff --git a/scripts/nsdeps b/scripts/nsdeps
index dda6fbac016e..04cea0921673 100644
--- a/scripts/nsdeps
+++ b/scripts/nsdeps
@@ -31,12 +31,12 @@ generate_deps() {
local mod_file=`echo $@ | sed -e 's/\.ko/\.mod/'`
local ns_deps_file=`echo $@ | sed -e 's/\.ko/\.ns_deps/'`
if [ ! -f "$ns_deps_file" ]; then return; fi
- local mod_source_files=`cat $mod_file | sed -n 1p \
+ local mod_source_files="`cat $mod_file | sed -n 1p \
| sed -e 's/\.o/\.c/g' \
- | sed "s|[^ ]* *|${srctree}/&|g"`
+ | sed "s|[^ ]* *|${srctree}/&|g"`"
for ns in `cat $ns_deps_file`; do
echo "Adding namespace $ns to module $mod_name (if needed)."
- generate_deps_for_ns $ns $mod_source_files
+ generate_deps_for_ns $ns "$mod_source_files"
# sort the imports
for source_file in $mod_source_files; do
sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
index 97a2c844a95e..45e8aa360b45 100755
--- a/scripts/tools-support-relr.sh
+++ b/scripts/tools-support-relr.sh
@@ -4,13 +4,13 @@
tmp_file=$(mktemp)
trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
-cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
+cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
-"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.
-test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
+test -z "$($NM $tmp_file 2>&1 >/dev/null)"
-"$OBJCOPY" -O binary $tmp_file $tmp_file.bin
+$OBJCOPY -O binary $tmp_file $tmp_file.bin
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 41905afada63..f34ce564d92c 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -528,7 +528,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
params->buffer.fragments == 0)
return -EINVAL;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index d80041ea4e01..2236b5e0c1f2 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1782,11 +1782,14 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
unsigned long flags;
- if (PCM_RUNTIME_CHECK(substream))
+ if (snd_BUG_ON(!substream))
return;
- runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
+ if (PCM_RUNTIME_CHECK(substream))
+ goto _unlock;
+ runtime = substream->runtime;
+
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
@@ -1797,6 +1800,7 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
#endif
_end:
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ _unlock:
snd_pcm_stream_unlock_irqrestore(substream, flags);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 6b724d2ee2de..59ae21b0bb93 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -284,11 +284,11 @@ int snd_timer_open(struct snd_timer_instance **ti,
goto unlock;
}
if (!list_empty(&timer->open_list_head)) {
- timeri = list_entry(timer->open_list_head.next,
+ struct snd_timer_instance *t =
+ list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
- if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
+ if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
err = -EBUSY;
- timeri = NULL;
goto unlock;
}
}
diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
index 32b864bee25f..06d6a37cd853 100644
--- a/sound/firewire/bebob/bebob_focusrite.c
+++ b/sound/firewire/bebob/bebob_focusrite.c
@@ -27,6 +27,8 @@
#define SAFFIRE_CLOCK_SOURCE_SPDIF 1
/* clock sources as returned from register of Saffire Pro 10 and 26 */
+#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff
+#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00
#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
@@ -189,6 +191,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
map = saffirepro_clk_maps[1];
/* In a case that this driver cannot handle the value of register. */
+ value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
err = -EIO;
goto end;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cf53fbd872ee..c52419376c74 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2396,6 +2396,9 @@ static const struct pci_device_id azx_ids[] = {
/* CometLake-H */
{ PCI_DEVICE(0x8086, 0x06C8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* CometLake-S */
+ { PCI_DEVICE(0x8086, 0xa3f0),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 6d1fb7c11f17..b7a1abb3e231 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -7604,7 +7604,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
- cancel_delayed_work_sync(&spec->unsol_hp_work);
+ cancel_delayed_work(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
if (tbl)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b72553710ffb..78bd2e3722c7 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -46,10 +46,12 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
((codec)->core.vendor_id == 0x80862800))
#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
#define is_icelake(codec) ((codec)->core.vendor_id == 0x8086280f)
+#define is_tigerlake(codec) ((codec)->core.vendor_id == 0x80862812)
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|| is_skylake(codec) || is_broxton(codec) \
|| is_kabylake(codec) || is_geminilake(codec) \
- || is_cannonlake(codec) || is_icelake(codec))
+ || is_cannonlake(codec) || is_icelake(codec) \
+ || is_tigerlake(codec))
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -2851,6 +2853,18 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
+static int patch_i915_tgl_hdmi(struct hda_codec *codec)
+{
+ /*
+ * pin to port mapping table where the value indicate the pin number and
+ * the index indicate the port number with 1 base.
+ */
+ static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+
+ return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
+}
+
+
/* Intel Baytrail and Braswell; with eld notifier */
static int patch_i915_byt_hdmi(struct hda_codec *codec)
{
@@ -4153,6 +4167,7 @@ HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 91242b6f8ea7..4570f662fb48 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -410,8 +410,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
return;
}
- snd_hdac_ext_bus_link_put(hdev->bus, hlink);
pm_runtime_disable(&hdev->dev);
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
}
static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b5fd8f08726e..f8b5b960e597 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -274,7 +274,7 @@ struct hdmi_codec_priv {
uint8_t eld[MAX_ELD_BYTES];
struct snd_pcm_chmap *chmap_info;
unsigned int chmap_idx;
- struct mutex lock;
+ unsigned long busy;
struct snd_soc_jack *jack;
unsigned int jack_status;
};
@@ -390,8 +390,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
int ret = 0;
- ret = mutex_trylock(&hcp->lock);
- if (!ret) {
+ ret = test_and_set_bit(0, &hcp->busy);
+ if (ret) {
dev_err(dai->dev, "Only one simultaneous stream supported!\n");
return -EINVAL;
}
@@ -419,7 +419,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
err:
/* Release the exclusive lock on error */
- mutex_unlock(&hcp->lock);
+ clear_bit(0, &hcp->busy);
return ret;
}
@@ -431,7 +431,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
- mutex_unlock(&hcp->lock);
+ clear_bit(0, &hcp->busy);
}
static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
@@ -811,8 +811,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
return -ENOMEM;
hcp->hcd = *hcd;
- mutex_init(&hcp->lock);
-
daidrv = devm_kcalloc(dev, dai_count, sizeof(*daidrv), GFP_KERNEL);
if (!daidrv)
return -ENOMEM;
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index eb709d528259..cae1def8902d 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -960,11 +960,11 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
/* Power on device */
if (gpio_is_valid(max98373->reset_gpio)) {
- ret = gpio_request(max98373->reset_gpio, "MAX98373_RESET");
+ ret = devm_gpio_request(&i2c->dev, max98373->reset_gpio,
+ "MAX98373_RESET");
if (ret) {
dev_err(&i2c->dev, "%s: Failed to request gpio %d\n",
__func__, max98373->reset_gpio);
- gpio_free(max98373->reset_gpio);
return -EINVAL;
}
gpio_direction_output(max98373->reset_gpio, 0);
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 667e9f73aba3..e3d311fb510e 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -306,7 +306,7 @@ struct pm8916_wcd_analog_priv {
};
static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
-static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" };
+static const char *const rdac2_mux_text[] = { "RX1", "RX2" };
static const char *const hph_text[] = { "ZERO", "Switch", };
static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
@@ -321,7 +321,7 @@ static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
/* RDAC2 MUX */
static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
- CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text);
+ CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 2, rdac2_mux_text);
static const struct snd_kcontrol_new spkr_switch[] = {
SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 61226fefe1c4..2a4ffe945177 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -555,10 +555,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = clk_prepare_enable(priv->clk);
- if (err < 0)
- return err;
-
priv->extclk = devm_clk_get(&pdev->dev, "extclk");
if (IS_ERR(priv->extclk)) {
if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
@@ -574,6 +570,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
}
}
+ err = clk_prepare_enable(priv->clk);
+ if (err < 0)
+ return err;
+
/* Some sensible defaults - this reflects the powerup values */
priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
@@ -587,7 +587,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128;
}
- err = devm_snd_soc_register_component(&pdev->dev, &kirkwood_soc_component,
+ err = snd_soc_register_component(&pdev->dev, &kirkwood_soc_component,
soc_dai, 2);
if (err) {
dev_err(&pdev->dev, "snd_soc_register_component failed\n");
@@ -610,6 +610,7 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
{
struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
if (!IS_ERR(priv->extclk))
clk_disable_unprepare(priv->extclk);
clk_disable_unprepare(priv->clk);
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index 0097df1fae66..e80b09143b63 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -66,10 +66,13 @@ static int rk_jack_event(struct notifier_block *nb, unsigned long event,
struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
struct snd_soc_dapm_context *dapm = &jack->card->dapm;
- if (event & SND_JACK_MICROPHONE)
+ if (event & SND_JACK_MICROPHONE) {
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
- else
+ snd_soc_dapm_force_enable_pin(dapm, "SHDN");
+ } else {
snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+ snd_soc_dapm_disable_pin(dapm, "SHDN");
+ }
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 0324a5c39619..28f65eba2bb4 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -508,10 +508,10 @@ static struct rsnd_mod_ops rsnd_dmapp_ops = {
#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
-#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
-#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index 54cd431faab7..5529e8eeca46 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -152,8 +152,10 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
*/
dentry = file->f_path.dentry;
if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
- strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
- return -EINVAL;
+ strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
flood_duration_test = true;
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 2c7447188402..0c11fceb28a7 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -190,7 +190,7 @@ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
* Workaround to address a known issue with host DMA that results
* in xruns during pause/release in capture scenarios.
*/
- if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
HDA_VS_INTEL_EM2,
@@ -228,7 +228,7 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
spin_unlock_irq(&bus->reg_lock);
/* Enable DMI L1 entry if there are no capture streams open */
- if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
if (!active_capture_stream)
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
HDA_VS_INTEL_EM2,
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index b2f359d2f7e5..086eeeab8679 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -572,8 +572,10 @@ static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev,
else
err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata,
sparams);
- if (err < 0)
+ if (err < 0) {
+ kfree(partdata);
return err;
+ }
msg_bytes = sparams->msg_bytes;
pl_size = sparams->pl_size;
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 0aabb3190ddc..4452594c2e17 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -543,15 +543,16 @@ static int sof_control_load_bytes(struct snd_soc_component *scomp,
struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
int max_size = sbe->max;
- if (le32_to_cpu(control->priv.size) > max_size) {
+ /* init the get/put bytes data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ le32_to_cpu(control->priv.size);
+
+ if (scontrol->size > max_size) {
dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
- control->priv.size, max_size);
+ scontrol->size, max_size);
return -EINVAL;
}
- /* init the get/put bytes data */
- scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
- le32_to_cpu(control->priv.size);
scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
cdata = scontrol->control_data;
if (!scontrol->control_data)
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index a4060813bc74..48e629ac2d88 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1218,6 +1218,16 @@ static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
return 0;
}
+/* No support of mmap in S/PDIF mode */
+static const struct snd_pcm_hardware stm32_sai_pcm_hw_spdif = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED,
+ .buffer_bytes_max = 8 * PAGE_SIZE,
+ .period_bytes_min = 1024,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = 2,
+ .periods_max = 8,
+};
+
static const struct snd_pcm_hardware stm32_sai_pcm_hw = {
.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
.buffer_bytes_max = 8 * PAGE_SIZE,
@@ -1270,7 +1280,7 @@ static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config = {
};
static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config_spdif = {
- .pcm_hardware = &stm32_sai_pcm_hw,
+ .pcm_hardware = &stm32_sai_pcm_hw_spdif,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
.process = stm32_sai_pcm_process_spdif,
};
diff --git a/sound/soc/ti/sdma-pcm.c b/sound/soc/ti/sdma-pcm.c
index a236350beb10..2b0bc234e1b6 100644
--- a/sound/soc/ti/sdma-pcm.c
+++ b/sound/soc/ti/sdma-pcm.c
@@ -62,7 +62,7 @@ int sdma_pcm_platform_register(struct device *dev,
config->chan_names[0] = txdmachan;
config->chan_names[1] = rxdmachan;
- return devm_snd_dmaengine_pcm_register(dev, config, 0);
+ return devm_snd_dmaengine_pcm_register(dev, config, flags);
}
EXPORT_SYMBOL_GPL(sdma_pcm_platform_register);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index a2ab8e8d3a93..4a9a2f6ef5a4 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -388,6 +388,9 @@ static void snd_complete_urb(struct urb *urb)
}
prepare_outbound_urb(ep, ctx);
+ /* can be stopped during prepare callback */
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
} else {
retire_inbound_urb(ep, ctx);
/* can be stopped during retire callback */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 3fd1d1749edf..45eee5cc312e 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1229,7 +1229,8 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
if (cval->min + cval->res < cval->max) {
int last_valid_res = cval->res;
int saved, test, check;
- get_cur_mix_raw(cval, minchn, &saved);
+ if (get_cur_mix_raw(cval, minchn, &saved) < 0)
+ goto no_res_check;
for (;;) {
test = saved;
if (test < cval->max)
@@ -1249,6 +1250,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
}
+no_res_check:
cval->initialized = 1;
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 0bbe1201a6ac..349e1e52996d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -248,8 +248,8 @@ static int create_yamaha_midi_quirk(struct snd_usb_audio *chip,
NULL, USB_MS_MIDI_OUT_JACK);
if (!injd && !outjd)
return -ENODEV;
- if (!(injd && snd_usb_validate_midi_desc(injd)) ||
- !(outjd && snd_usb_validate_midi_desc(outjd)))
+ if ((injd && !snd_usb_validate_midi_desc(injd)) ||
+ (outjd && !snd_usb_validate_midi_desc(outjd)))
return -ENODEV;
if (injd && (injd->bLength < 5 ||
(injd->bJackType != USB_MS_EMBEDDED &&
diff --git a/sound/usb/validate.c b/sound/usb/validate.c
index a5e584b60dcd..389e8657434a 100644
--- a/sound/usb/validate.c
+++ b/sound/usb/validate.c
@@ -81,9 +81,9 @@ static bool validate_processing_unit(const void *p,
switch (v->protocol) {
case UAC_VERSION_1:
default:
- /* bNrChannels, wChannelConfig, iChannelNames, bControlSize */
- len += 1 + 2 + 1 + 1;
- if (d->bLength < len) /* bControlSize */
+ /* bNrChannels, wChannelConfig, iChannelNames */
+ len += 1 + 2 + 1;
+ if (d->bLength < len + 1) /* bControlSize */
return false;
m = hdr[len];
len += 1 + m + 1; /* bControlSize, bmControls, iProcessing */
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 6ecdd1067826..1178d302757e 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -3,7 +3,11 @@ include ../scripts/Makefile.include
bindir ?= /usr/bin
-ifeq ($(srctree),)
+# This will work when gpio is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 63e4349a772a..15e458e150bd 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -15,7 +15,9 @@ void test_attr__init(void);
void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
int fd, int group_fd, unsigned long flags);
-#define HAVE_ATTR_TEST
+#ifndef HAVE_ATTR_TEST
+#define HAVE_ATTR_TEST 1
+#endif
static inline int
sys_perf_event_open(struct perf_event_attr *attr,
@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
fd = syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags);
-#ifdef HAVE_ATTR_TEST
+#if HAVE_ATTR_TEST
if (unlikely(test_attr__enabled))
test_attr__open(attr, pid, cpu, fd, group_fd, flags);
#endif
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 679a1d75090c..7b6eaf5e0bda 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1625,7 +1625,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
return 0;
}
-static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
+static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
struct hists *hists = a->hists;
struct perf_hpp_fmt *fmt;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 15961854ba67..741f040648b5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -539,10 +539,11 @@ static int perl_stop_script(void)
static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
{
+ int i, not_first, count, nr_events;
+ struct tep_event **all_events;
struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
- int not_first, count;
FILE *ofp;
sprintf(fname, "%s.pl", outfile);
@@ -603,8 +604,11 @@ sub print_backtrace\n\
}\n\n\
");
+ nr_events = tep_get_events_count(pevent);
+ all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
- while ((event = trace_find_next_event(pevent, event))) {
+ for (i = 0; all_events && i < nr_events; i++) {
+ event = all_events[i];
fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
fprintf(ofp, "\tmy (");
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 5d341efc3237..93c03b39cd9c 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1687,10 +1687,11 @@ static int python_stop_script(void)
static int python_generate_script(struct tep_handle *pevent, const char *outfile)
{
+ int i, not_first, count, nr_events;
+ struct tep_event **all_events;
struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
- int not_first, count;
FILE *ofp;
sprintf(fname, "%s.py", outfile);
@@ -1735,7 +1736,11 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
- while ((event = trace_find_next_event(pevent, event))) {
+ nr_events = tep_get_events_count(pevent);
+ all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
+
+ for (i = 0; all_events && i < nr_events; i++) {
+ event = all_events[i];
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 5d6bfc70b210..9634f0ae57be 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -173,37 +173,6 @@ int parse_event_file(struct tep_handle *pevent,
return tep_parse_event(pevent, buf, size, sys);
}
-struct tep_event *trace_find_next_event(struct tep_handle *pevent,
- struct tep_event *event)
-{
- static int idx;
- int events_count;
- struct tep_event *all_events;
-
- all_events = tep_get_first_event(pevent);
- events_count = tep_get_events_count(pevent);
- if (!pevent || !all_events || events_count < 1)
- return NULL;
-
- if (!event) {
- idx = 0;
- return all_events;
- }
-
- if (idx < events_count && event == (all_events + idx)) {
- idx++;
- if (idx == events_count)
- return NULL;
- return (all_events + idx);
- }
-
- for (idx = 1; idx < events_count; idx++) {
- if (event == (all_events + (idx - 1)))
- return (all_events + idx);
- }
- return NULL;
-}
-
struct flag {
const char *name;
unsigned long long value;
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 2e158387b3d7..72fdf2a3577c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -47,8 +47,6 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
-struct tep_event *trace_find_next_event(struct tep_handle *pevent,
- struct tep_event *event);
unsigned long long read_size(struct tep_event *event, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index fc8a4319c1b2..8294ae3ffb3c 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -314,7 +314,10 @@ class DebugfsDir:
continue
p = os.path.join(path, f)
- if os.path.isfile(p) and os.access(p, os.R_OK):
+ if not os.stat(p).st_mode & stat.S_IRUSR:
+ continue
+
+ if os.path.isfile(p):
_, out = cmd('cat %s/%s' % (path, f))
dfs[f] = out.strip()
elif os.path.isdir(p):
@@ -332,13 +335,22 @@ class NetdevSimDev:
"""
Class for netdevsim bus device and its attributes.
"""
+ @staticmethod
+ def ctrl_write(path, val):
+ fullpath = os.path.join("/sys/bus/netdevsim/", path)
+ try:
+ with open(fullpath, "w") as f:
+ f.write(val)
+ except OSError as e:
+ log("WRITE %s: %r" % (fullpath, val), -e.errno)
+ raise e
+ log("WRITE %s: %r" % (fullpath, val), 0)
def __init__(self, port_count=1):
addr = 0
while True:
try:
- with open("/sys/bus/netdevsim/new_device", "w") as f:
- f.write("%u %u" % (addr, port_count))
+ self.ctrl_write("new_device", "%u %u" % (addr, port_count))
except OSError as e:
if e.errno == errno.ENOSPC:
addr += 1
@@ -400,14 +412,13 @@ class NetdevSimDev:
return progs
def remove(self):
- with open("/sys/bus/netdevsim/del_device", "w") as f:
- f.write("%u" % self.addr)
+ self.ctrl_write("del_device", "%u" % (self.addr, ))
devs.remove(self)
def remove_nsim(self, nsim):
self.nsims.remove(nsim)
- with open("/sys/bus/netdevsim/devices/netdevsim%u/del_port" % self.addr ,"w") as f:
- f.write("%u" % nsim.port_index)
+ self.ctrl_write("devices/netdevsim%u/del_port" % (self.addr, ),
+ "%u" % (nsim.port_index, ))
class NetdevSim:
"""
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index 7aff907003d3..40bd93a6e7ae 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -184,9 +184,14 @@ static struct sysctl_test tests[] = {
.descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = {
/* If (file_pos == X) */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, file_pos) + 3),
+#endif
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
@@ -199,6 +204,7 @@ static struct sysctl_test tests[] = {
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
+ .seek = 4,
.result = SUCCESS,
},
{
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
index 126caf28b529..58cdbfb608e9 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
@@ -92,46 +92,6 @@ cleanup()
vrf_cleanup
}
-l2_drops_test()
-{
- local trap_name=$1; shift
- local group_name=$1; shift
-
- # This is the common part of all the tests. It checks that stats are
- # initially idle, then non-idle after changing the trap action and
- # finally idle again. It also makes sure the packets are dropped and
- # never forwarded.
- devlink_trap_stats_idle_test $trap_name
- check_err $? "Trap stats not idle with initial drop action"
- devlink_trap_group_stats_idle_test $group_name
- check_err $? "Trap group stats not idle with initial drop action"
-
- devlink_trap_action_set $trap_name "trap"
-
- devlink_trap_stats_idle_test $trap_name
- check_fail $? "Trap stats idle after setting action to trap"
- devlink_trap_group_stats_idle_test $group_name
- check_fail $? "Trap group stats idle after setting action to trap"
-
- devlink_trap_action_set $trap_name "drop"
-
- devlink_trap_stats_idle_test $trap_name
- check_err $? "Trap stats not idle after setting action to drop"
- devlink_trap_group_stats_idle_test $group_name
- check_err $? "Trap group stats not idle after setting action to drop"
-
- tc_check_packets "dev $swp2 egress" 101 0
- check_err $? "Packets were not dropped"
-}
-
-l2_drops_cleanup()
-{
- local mz_pid=$1; shift
-
- kill $mz_pid && wait $mz_pid &> /dev/null
- tc filter del dev $swp2 egress protocol ip pref 1 handle 101 flower
-}
-
source_mac_is_multicast_test()
{
local trap_name="source_mac_is_multicast"
@@ -147,11 +107,11 @@ source_mac_is_multicast_test()
RET=0
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
log_test "Source MAC is multicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
__vlan_tag_mismatch_test()
@@ -172,7 +132,7 @@ __vlan_tag_mismatch_test()
$MZ $h1 "$opt" -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Add PVID and make sure packets are no longer dropped.
bridge vlan add vid 1 dev $swp1 pvid untagged master
@@ -188,7 +148,7 @@ __vlan_tag_mismatch_test()
devlink_trap_action_set $trap_name "drop"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
vlan_tag_mismatch_untagged_test()
@@ -233,7 +193,7 @@ ingress_vlan_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Add the VLAN on the bridge port and make sure packets are no longer
# dropped.
@@ -252,7 +212,7 @@ ingress_vlan_filter_test()
log_test "Ingress VLAN filter"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -277,7 +237,7 @@ __ingress_stp_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Change STP state to forwarding and make sure packets are no longer
# dropped.
@@ -294,7 +254,7 @@ __ingress_stp_filter_test()
devlink_trap_action_set $trap_name "drop"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -348,7 +308,7 @@ port_list_is_empty_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave flood on
@@ -366,7 +326,7 @@ port_list_is_empty_uc_test()
log_test "Port list is empty - unicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
ip link set dev $swp1 type bridge_slave flood on
}
@@ -394,7 +354,7 @@ port_list_is_empty_mc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -B $dip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave mcast_flood on
@@ -412,7 +372,7 @@ port_list_is_empty_mc_test()
log_test "Port list is empty - multicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
ip link set dev $swp1 type bridge_slave mcast_flood on
}
@@ -441,7 +401,7 @@ port_loopback_filter_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded.
ip link set dev $swp2 type bridge_slave flood on
@@ -459,7 +419,7 @@ port_loopback_filter_uc_test()
log_test "Port loopback filter - unicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
port_loopback_filter_test()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
new file mode 100755
index 000000000000..b4efb023ae51
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -0,0 +1,563 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap L3 drops functionality over mlxsw. Each registered L3 drop
+# packet trap is tested to make sure it is triggered under the right
+# conditions.
+
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ non_ip_test
+ uc_dip_over_mc_dmac_test
+ dip_is_loopback_test
+ sip_is_mc_test
+ sip_is_loopback_test
+ ip_header_corrupted_test
+ ipv4_sip_is_limited_bc_test
+ ipv6_mc_dip_reserved_scope_test
+ ipv6_mc_dip_interface_local_scope_test
+ blackhole_route_test
+"
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $h2_ipv4/24 $h2_ipv6/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 $h2_ipv4/24 $h2_ipv6/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp2 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp2 clsact
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h1mac=$(mac_get $h1)
+ rp1mac=$(mac_get $rp1)
+
+ h1_ipv4=192.0.2.1
+ h2_ipv4=198.51.100.1
+ h1_ipv6=2001:db8:1::1
+ h2_ipv6=2001:db8:2::1
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_check()
+{
+ trap_name=$1; shift
+
+ devlink_trap_action_set $trap_name "trap"
+ ping_do $h1 $h2_ipv4
+ check_err $? "Packets that should not be trapped were trapped"
+ devlink_trap_action_set $trap_name "drop"
+}
+
+non_ip_test()
+{
+ local trap_name="non_ip"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ # Generate non-IP packets to the router
+ $MZ $h1 -c 0 -p 100 -d 1msec -B $h2_ipv4 -q "$rp1mac $h1mac \
+ 00:00 de:ad:be:ef" &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Non IP"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+__uc_dip_over_mc_dmac_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="uc_dip_over_mc_dmac"
+ local group_name="l3_drops"
+ local dmac=01:02:03:04:05:06
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower ip_proto udp src_port 54321 dst_port 12345 action drop
+
+ # Generate IP packets with a unicast IP and a multicast destination MAC
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $dmac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Unicast destination IP over multicast destination MAC: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+uc_dip_over_mc_dmac_test()
+{
+ __uc_dip_over_mc_dmac_test "IPv4" "ip" $h2_ipv4
+ __uc_dip_over_mc_dmac_test "IPv6" "ipv6" $h2_ipv6 "-6"
+}
+
+__sip_is_loopback_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="sip_is_loopback_address"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with loopback source IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip \
+ -b $rp1mac -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Source IP is loopback address: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+sip_is_loopback_test()
+{
+ __sip_is_loopback_test "IPv4" "ip" "127.0.0.0/8" $h2_ipv4
+ __sip_is_loopback_test "IPv6" "ipv6" "::1" $h2_ipv6 "-6"
+}
+
+__dip_is_loopback_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="dip_is_loopback_address"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with loopback destination IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $rp1mac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Destination IP is loopback address: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+dip_is_loopback_test()
+{
+ __dip_is_loopback_test "IPv4" "ip" "127.0.0.0/8"
+ __dip_is_loopback_test "IPv6" "ipv6" "::1" "-6"
+}
+
+__sip_is_mc_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="sip_is_mc"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with multicast source IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip \
+ -b $rp1mac -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Source IP is multicast: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+sip_is_mc_test()
+{
+ __sip_is_mc_test "IPv4" "ip" "239.1.1.1" $h2_ipv4
+ __sip_is_mc_test "IPv6" "ipv6" "FF02::2" $h2_ipv6 "-6"
+}
+
+ipv4_sip_is_limited_bc_test()
+{
+ local trap_name="ipv4_sip_is_limited_bc"
+ local group_name="l3_drops"
+ local sip=255.255.255.255
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with limited broadcast source IP
+ $MZ $h1 -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip -b $rp1mac \
+ -B $h2_ipv4 -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv4 source IP is limited broadcast"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ipv4_payload_get()
+{
+ local ipver=$1; shift
+ local ihl=$1; shift
+ local checksum=$1; shift
+
+ p=$(:
+ )"08:00:"$( : ETH type
+ )"$ipver"$( : IP version
+ )"$ihl:"$( : IHL
+ )"00:"$( : IP TOS
+ )"00:F4:"$( : IP total length
+ )"00:00:"$( : IP identification
+ )"20:00:"$( : IP flags + frag off
+ )"30:"$( : IP TTL
+ )"01:"$( : IP proto
+ )"$checksum:"$( : IP header csum
+ )"$h1_ipv4:"$( : IP saddr
+ )"$h2_ipv4:"$( : IP daddr
+ )
+ echo $p
+}
+
+__ipv4_header_corrupted_test()
+{
+ local desc=$1; shift
+ local ipver=$1; shift
+ local ihl=$1; shift
+ local checksum=$1; shift
+ local trap_name="ip_header_corrupted"
+ local group_name="l3_drops"
+ local payload
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ payload=$(ipv4_payload_get $ipver $ihl $checksum)
+
+ # Generate packets with corrupted IP header
+ $MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IP header corrupted: $desc: IPv4"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ipv6_payload_get()
+{
+ local ipver=$1; shift
+
+ p=$(:
+ )"86:DD:"$( : ETH type
+ )"$ipver"$( : IP version
+ )"0:0:"$( : Traffic class
+ )"0:00:00:"$( : Flow label
+ )"00:00:"$( : Payload length
+ )"01:"$( : Next header
+ )"04:"$( : Hop limit
+ )"$h1_ipv6:"$( : IP saddr
+ )"$h2_ipv6:"$( : IP daddr
+ )
+ echo $p
+}
+
+__ipv6_header_corrupted_test()
+{
+ local desc=$1; shift
+ local ipver=$1; shift
+ local trap_name="ip_header_corrupted"
+ local group_name="l3_drops"
+ local payload
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ payload=$(ipv6_payload_get $ipver)
+
+ # Generate packets with corrupted IP header
+ $MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IP header corrupted: $desc: IPv6"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ip_header_corrupted_test()
+{
+ # Each test uses one wrong value. The three values below are correct.
+ local ipv="4"
+ local ihl="5"
+ local checksum="00:F4"
+
+ __ipv4_header_corrupted_test "wrong IP version" 5 $ihl $checksum
+ __ipv4_header_corrupted_test "wrong IHL" $ipv 4 $checksum
+ __ipv4_header_corrupted_test "wrong checksum" $ipv $ihl "00:00"
+ __ipv6_header_corrupted_test "wrong IP version" 5
+}
+
+ipv6_mc_dip_reserved_scope_test()
+{
+ local trap_name="ipv6_mc_dip_reserved_scope"
+ local group_name="l3_drops"
+ local dip=FF00::
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ipv6 pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with reserved scope destination IP
+ $MZ $h1 -6 -t udp "sp=54321,dp=12345" -c 0 -p 100 -b \
+ "33:33:00:00:00:00" -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv6 multicast destination IP reserved scope"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+}
+
+ipv6_mc_dip_interface_local_scope_test()
+{
+ local trap_name="ipv6_mc_dip_interface_local_scope"
+ local group_name="l3_drops"
+ local dip=FF01::
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ipv6 pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with interface local scope destination IP
+ $MZ $h1 -6 -t udp "sp=54321,dp=12345" -c 0 -p 100 -b \
+ "33:33:00:00:00:00" -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv6 multicast destination IP interface-local scope"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+}
+
+__blackhole_route_test()
+{
+ local flags=$1; shift
+ local subnet=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local ip_proto=${1:-"icmp"}; shift
+ local trap_name="blackhole_route"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ ip -$flags route add blackhole $subnet
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower skip_hw dst_ip $dip ip_proto $ip_proto action drop
+
+ # Generate packets to the blackhole route
+ $MZ $h1 -$flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $rp1mac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+ log_test "Blackhole route: IPv$flags"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ ip -$flags route del blackhole $subnet
+}
+
+blackhole_route_test()
+{
+ __blackhole_route_test "4" "198.51.100.0/30" "ip" $h2_ipv4
+ __blackhole_route_test "6" "2001:db8:2::/120" "ipv6" $h2_ipv6 "icmpv6"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
new file mode 100755
index 000000000000..2bc6df42d597
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -0,0 +1,557 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap L3 exceptions functionality over mlxsw.
+# Check all exception traps to make sure they are triggered under the right
+# conditions.
+
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ mtu_value_is_too_small_test
+ ttl_value_is_too_small_test
+ mc_reverse_path_forwarding_test
+ reject_route_test
+ unresolved_neigh_test
+ ipv4_lpm_miss_test
+ ipv6_lpm_miss_test
+"
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+require_command $MCD
+require_command $MC_CLI
+table_name=selftests
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+
+ tc qdisc add dev $h1 clsact
+}
+
+h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp2 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp2 clsact
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1mac=$(mac_get $rp1)
+
+ start_mcd
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+
+ kill_mcd
+}
+
+ping_check()
+{
+ ping_do $h1 198.51.100.1
+ check_err $? "Packets that should not be trapped were trapped"
+}
+
+trap_action_check()
+{
+ local trap_name=$1; shift
+ local expected_action=$1; shift
+
+ action=$(devlink_trap_action_get $trap_name)
+ if [ "$action" != $expected_action ]; then
+ check_err 1 "Trap $trap_name has wrong action: $action"
+ fi
+}
+
+mtu_value_is_too_small_test()
+{
+ local trap_name="mtu_value_is_too_small"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # type - Destination Unreachable
+ # code - Fragmentation Needed and Don't Fragment was Set
+ tc filter add dev $h1 ingress protocol ip pref 1 handle 101 \
+ flower skip_hw ip_proto icmp type 3 code 4 action pass
+
+ mtu_set $rp2 1300
+
+ # Generate IP packets bigger than router's MTU with don't fragment
+ # flag on.
+ $MZ $h1 -t udp "sp=54321,dp=12345,df" -p 1400 -c 0 -d 1msec -b $rp1mac \
+ -B 198.51.100.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "Packets were not received to h1"
+
+ log_test "MTU value is too small"
+
+ mtu_restore $rp2
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
+}
+
+__ttl_value_is_too_small_test()
+{
+ local ttl_val=$1; shift
+ local trap_name="ttl_value_is_too_small"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # type - Time Exceeded
+ # code - Time to Live exceeded in Transit
+ tc filter add dev $h1 ingress protocol ip pref 1 handle 101 \
+ flower skip_hw ip_proto icmp type 11 code 0 action pass
+
+ # Generate IP packets with small TTL
+ $MZ $h1 -t udp "ttl=$ttl_val,sp=54321,dp=12345" -c 0 -d 1msec \
+ -b $rp1mac -B 198.51.100.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "Packets were not received to h1"
+
+ log_test "TTL value is too small: TTL=$ttl_val"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
+}
+
+ttl_value_is_too_small_test()
+{
+ __ttl_value_is_too_small_test 0
+ __ttl_value_is_too_small_test 1
+}
+
+start_mcd()
+{
+ SMCROUTEDIR="$(mktemp -d)"
+ for ((i = 1; i <= $NUM_NETIFS; ++i)); do
+ echo "phyint ${NETIFS[p$i]} enable" >> \
+ $SMCROUTEDIR/$table_name.conf
+ done
+
+ $MCD -N -I $table_name -f $SMCROUTEDIR/$table_name.conf \
+ -P $SMCROUTEDIR/$table_name.pid
+}
+
+kill_mcd()
+{
+ pkill $MCD
+ rm -rf $SMCROUTEDIR
+}
+
+__mc_reverse_path_forwarding_test()
+{
+ local desc=$1; shift
+ local src_ip=$1; shift
+ local dst_ip=$1; shift
+ local dst_mac=$1; shift
+ local proto=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="mc_reverse_path_forwarding"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower dst_ip $dst_ip ip_proto udp action drop
+
+ $MC_CLI -I $table_name add $rp1 $src_ip $dst_ip $rp2
+
+ # Generate packets to multicast address.
+ $MZ $h2 $flags -t udp "sp=54321,dp=12345" -c 0 -p 128 \
+ -a 00:11:22:33:44:55 -b $dst_mac \
+ -A $src_ip -B $dst_ip -q &
+
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets "dev $rp2 egress" 101 0
+ check_err $? "Packets were not dropped"
+
+ log_test "Multicast reverse path forwarding: $desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $rp2 egress protocol $proto pref 1 handle 101 flower
+}
+
+mc_reverse_path_forwarding_test()
+{
+ __mc_reverse_path_forwarding_test "IPv4" "192.0.2.1" "225.1.2.3" \
+ "01:00:5e:01:02:03" "ip"
+ __mc_reverse_path_forwarding_test "IPv6" "2001:db8:1::1" "ff0e::3" \
+ "33:33:00:00:00:03" "ipv6" "-6"
+}
+
+__reject_route_test()
+{
+ local desc=$1; shift
+ local dst_ip=$1; shift
+ local proto=$1; shift
+ local ip_proto=$1; shift
+ local type=$1; shift
+ local code=$1; shift
+ local unreachable=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="reject_route"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ tc filter add dev $h1 ingress protocol $proto pref 1 handle 101 flower \
+ skip_hw ip_proto $ip_proto type $type code $code action pass
+
+ ip route add unreachable $unreachable
+
+ # Generate pacekts to h2. The destination IP is unreachable.
+ $MZ $flags $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B $dst_ip -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "ICMP packet was not received to h1"
+
+ log_test "Reject route: $desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ ip route del unreachable $unreachable
+ tc filter del dev $h1 ingress protocol $proto pref 1 handle 101 flower
+}
+
+reject_route_test()
+{
+ # type - Destination Unreachable
+ # code - Host Unreachable
+ __reject_route_test "IPv4" 198.51.100.1 "ip" "icmp" 3 1 \
+ "198.51.100.0/26"
+ # type - Destination Unreachable
+ # code - No Route
+ __reject_route_test "IPv6" 2001:db8:2::1 "ipv6" "icmpv6" 1 0 \
+ "2001:db8:2::0/66" "-6"
+}
+
+__host_miss_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local trap_name="unresolved_neigh"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ ip neigh flush dev $rp2
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ # Generate packets to h2 (will incur a unresolved neighbor).
+ # The ping should pass and devlink counters should be increased.
+ ping_do $h1 $dip
+ check_err $? "ping failed: $desc"
+
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ check_err 1 "Trap counter did not increase"
+ fi
+
+ log_test "Unresolved neigh: host miss: $desc"
+}
+
+__invalid_nexthop_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local extra_add=$1; shift
+ local subnet=$1; shift
+ local via_add=$1; shift
+ local trap_name="unresolved_neigh"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ ip address add $extra_add/$subnet dev $h2
+
+ # Check that correct route does not trigger unresolved_neigh
+ ip $flags route add $dip via $extra_add dev $rp2
+
+ # Generate packets in order to discover all neighbours.
+ # Without it, counters of unresolved_neigh will be increased
+ # during neighbours discovery and the check below will fail
+ # for a wrong reason
+ ping_do $h1 $dip
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+ ping_do $h1 $dip
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -ne $t1_packets ]]; then
+ check_err 1 "Trap counter increased when it should not"
+ fi
+
+ ip $flags route del $dip via $extra_add dev $rp2
+
+ # Check that route to nexthop that does not exist trigger
+ # unresolved_neigh
+ ip $flags route add $dip via $via_add dev $h2
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+ ping_do $h1 $dip
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ check_err 1 "Trap counter did not increase"
+ fi
+
+ ip $flags route del $dip via $via_add dev $h2
+ ip address del $extra_add/$subnet dev $h2
+ log_test "Unresolved neigh: nexthop does not exist: $desc"
+}
+
+unresolved_neigh_test()
+{
+ __host_miss_test "IPv4" 198.51.100.1
+ __host_miss_test "IPv6" 2001:db8:2::1
+ __invalid_nexthop_test "IPv4" 198.51.100.1 198.51.100.3 24 198.51.100.4
+ __invalid_nexthop_test "IPv6" 2001:db8:2::1 2001:db8:2::3 64 \
+ 2001:db8:2::4
+}
+
+vrf_without_routes_create()
+{
+ # VRF creating makes the links to be down and then up again.
+ # By default, IPv6 address is not saved after link becomes down.
+ # Save IPv6 address using sysctl configuration.
+ sysctl_set net.ipv6.conf.$rp1.keep_addr_on_down 1
+ sysctl_set net.ipv6.conf.$rp2.keep_addr_on_down 1
+
+ ip link add dev vrf1 type vrf table 101
+ ip link set dev $rp1 master vrf1
+ ip link set dev $rp2 master vrf1
+ ip link set dev vrf1 up
+
+ # Wait for rp1 and rp2 to be up
+ setup_wait
+}
+
+vrf_without_routes_destroy()
+{
+ ip link set dev $rp1 nomaster
+ ip link set dev $rp2 nomaster
+ ip link del dev vrf1
+
+ sysctl_restore net.ipv6.conf.$rp2.keep_addr_on_down
+ sysctl_restore net.ipv6.conf.$rp1.keep_addr_on_down
+
+ # Wait for interfaces to be up
+ setup_wait
+}
+
+ipv4_lpm_miss_test()
+{
+ local trap_name="ipv4_lpm_miss"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # Create a VRF without a default route
+ vrf_without_routes_create
+
+ # Generate packets through a VRF without a matching route.
+ $MZ $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B 203.0.113.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ log_test "LPM miss: IPv4"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ vrf_without_routes_destroy
+}
+
+ipv6_lpm_miss_test()
+{
+ local trap_name="ipv6_lpm_miss"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # Create a VRF without a default route
+ vrf_without_routes_create
+
+ # Generate packets through a VRF without a matching route.
+ $MZ -6 $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B 2001:db8::1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ log_test "LPM miss: IPv6"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ vrf_without_routes_destroy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index 2b5f4f7cc905..7b2acba82a49 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -8,6 +8,11 @@ source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
source $lib_dir/devlink_lib.sh
+if [ "$DEVLINK_VIDDID" != "15b3:cf6c" ]; then
+ echo "SKIP: test is tailored for Mellanox Spectrum-2"
+ exit 1
+fi
+
current_test=""
cleanup()
@@ -16,11 +21,13 @@ cleanup()
if [ ! -z $current_test ]; then
${current_test}_cleanup
fi
+ # Need to reload in order to avoid router abort.
+ devlink_reload
}
trap cleanup EXIT
-ALL_TESTS="tc_flower mirror_gre"
+ALL_TESTS="router tc_flower mirror_gre"
for current_test in ${TESTS:-$ALL_TESTS}; do
source ${current_test}_scale.sh
@@ -34,6 +41,7 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
setup_wait $num_netifs
${current_test}_test "$target" "$should_fail"
${current_test}_cleanup
+ devlink_reload
if [[ "$should_fail" -eq 0 ]]; then
log_test "'$current_test' $target"
else
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh
new file mode 100644
index 000000000000..1897e163e3ab
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../router_scale.sh
+
+router_get_target()
+{
+ local should_fail=$1
+ local target
+
+ target=$(devlink_resource_size_get kvd)
+
+ if [[ $should_fail -eq 0 ]]; then
+ target=$((target * 85 / 100))
+ else
+ target=$((target + 1))
+ fi
+
+ echo $target
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index ae6146ec5afd..4632f51af7ab 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -112,14 +112,16 @@ sanitization_single_dev_mcast_group_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
+ ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
- dev $swp2 group 239.0.0.1
+ dev dummy1 group 239.0.0.1
sanitization_single_dev_test_fail
ip link del dev vxlan0
+ ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with a multicast group"
@@ -181,13 +183,15 @@ sanitization_single_dev_local_interface_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
+ ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
- ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev dummy1
sanitization_single_dev_test_fail
ip link del dev vxlan0
+ ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with local interface"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index ee89cd2f5bee..025a84c2ab5a 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -279,6 +279,12 @@ resource_test()
devlink -N testns1 dev reload $DL_HANDLE netns testns2
check_fail $? "Unexpected successful reload from netns \"testns1\" into netns \"testns2\""
+ devlink -N testns2 resource set $DL_HANDLE path IPv4/fib size ' -1'
+ check_err $? "Failed to reset IPv4/fib resource size"
+
+ devlink -N testns2 dev reload $DL_HANDLE netns 1
+ check_err $? "Failed to reload devlink back"
+
ip netns del testns2
ip netns del testns1
@@ -425,6 +431,15 @@ dummy_reporter_test()
check_reporter_info dummy healthy 3 3 10 true
+ echo 8192> $DEBUGFS_DIR/health/binary_len
+ check_fail $? "Failed set dummy reporter binary len to 8192"
+
+ local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j)
+ check_err $? "Failed show dump of dummy reporter"
+
+ devlink health dump clear $DL_HANDLE reporter dummy
+ check_err $? "Failed clear dump of dummy reporter"
+
log_test "dummy reporter test"
}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 4911fc77d0f6..d1cf9f6e0e6b 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -55,7 +55,7 @@ static void test_dump_stack(void)
#pragma GCC diagnostic pop
}
-static pid_t gettid(void)
+static pid_t _gettid(void)
{
return syscall(SYS_gettid);
}
@@ -72,7 +72,7 @@ test_assert(bool exp, const char *exp_str,
fprintf(stderr, "==== Test Assertion Failure ====\n"
" %s:%u: %s\n"
" pid=%d tid=%d - %s\n",
- file, line, exp_str, getpid(), gettid(),
+ file, line, exp_str, getpid(), _gettid(),
strerror(errno));
test_dump_stack();
if (fmt) {
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 0bd6b23c97ef..a8e04d665b69 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -10,7 +10,7 @@ TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
-TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh
+TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh
new file mode 100755
index 000000000000..4254ddc3f70b
--- /dev/null
+++ b/tools/testing/selftests/net/altnames.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/forwarding
+
+ALL_TESTS="altnames_test"
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+
+DUMMY_DEV=dummytest
+SHORT_NAME=shortname
+LONG_NAME=someveryveryveryveryveryverylongname
+
+altnames_test()
+{
+ RET=0
+ local output
+ local name
+
+ ip link property add $DUMMY_DEV altname $SHORT_NAME
+ check_err $? "Failed to add short alternative name"
+
+ output=$(ip -j -p link show $SHORT_NAME)
+ check_err $? "Failed to do link show with short alternative name"
+
+ name=$(echo $output | jq -e -r ".[0].altnames[0]")
+ check_err $? "Failed to get short alternative name from link show JSON"
+
+ [ "$name" == "$SHORT_NAME" ]
+ check_err $? "Got unexpected short alternative name from link show JSON"
+
+ ip -j -p link show $DUMMY_DEV &>/dev/null
+ check_err $? "Failed to do link show with original name"
+
+ ip link property add $DUMMY_DEV altname $LONG_NAME
+ check_err $? "Failed to add long alternative name"
+
+ output=$(ip -j -p link show $LONG_NAME)
+ check_err $? "Failed to do link show with long alternative name"
+
+ name=$(echo $output | jq -e -r ".[0].altnames[1]")
+ check_err $? "Failed to get long alternative name from link show JSON"
+
+ [ "$name" == "$LONG_NAME" ]
+ check_err $? "Got unexpected long alternative name from link show JSON"
+
+ ip link property del $DUMMY_DEV altname $SHORT_NAME
+ check_err $? "Failed to add short alternative name"
+
+ ip -j -p link show $SHORT_NAME &>/dev/null
+ check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
+
+ # long name is left there on purpose to be removed alongside the device
+
+ log_test "altnames test"
+}
+
+setup_prepare()
+{
+ ip link add name $DUMMY_DEV type dummy
+}
+
+cleanup()
+{
+ pre_cleanup
+ ip link del name $DUMMY_DEV
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 76c1897e6352..6dd403103800 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,7 +9,7 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter"
+TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -1500,6 +1500,55 @@ ipv4_route_metrics_test()
route_cleanup
}
+ipv4_del_addr_test()
+{
+ echo
+ echo "IPv4 delete address route tests"
+
+ setup
+
+ set -e
+ $IP li add dummy1 type dummy
+ $IP li set dummy1 up
+ $IP li add dummy2 type dummy
+ $IP li set dummy2 up
+ $IP li add red type vrf table 1111
+ $IP li set red up
+ $IP ro add vrf red unreachable default
+ $IP li set dummy2 vrf red
+
+ $IP addr add dev dummy1 172.16.104.1/24
+ $IP addr add dev dummy1 172.16.104.11/24
+ $IP addr add dev dummy2 172.16.104.1/24
+ $IP addr add dev dummy2 172.16.104.11/24
+ $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+ $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+ set +e
+
+ # removing address from device in vrf should only remove route from vrf table
+ $IP addr del dev dummy2 172.16.104.11/24
+ $IP ro ls vrf red | grep -q 172.16.105.0/24
+ log_test $? 1 "Route removed from VRF when source address deleted"
+
+ $IP ro ls | grep -q 172.16.105.0/24
+ log_test $? 0 "Route in default VRF not removed"
+
+ $IP addr add dev dummy2 172.16.104.11/24
+ $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+
+ $IP addr del dev dummy1 172.16.104.11/24
+ $IP ro ls | grep -q 172.16.105.0/24
+ log_test $? 1 "Route removed in default VRF when source address deleted"
+
+ $IP ro ls vrf red | grep -q 172.16.105.0/24
+ log_test $? 0 "Route in VRF is not removed by address delete"
+
+ $IP li del dummy1
+ $IP li del dummy2
+ cleanup
+}
+
+
ipv4_route_v6_gw_test()
{
local rc
@@ -1633,6 +1682,7 @@ do
ipv4_route_test|ipv4_rt) ipv4_route_test;;
ipv6_addr_metric) ipv6_addr_metric_test;;
ipv4_addr_metric) ipv4_addr_metric_test;;
+ ipv4_del_addr) ipv4_del_addr_test;;
ipv6_route_metrics) ipv6_route_metrics_test;;
ipv4_route_metrics) ipv4_route_metrics_test;;
ipv4_route_v6_gw) ipv4_route_v6_gw_test;;
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 13d03a6d85ba..40b076983239 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -355,3 +355,58 @@ devlink_trap_group_stats_idle_test()
return 1
fi
}
+
+devlink_trap_exception_test()
+{
+ local trap_name=$1; shift
+ local group_name=$1; shift
+
+ devlink_trap_stats_idle_test $trap_name
+ check_fail $? "Trap stats idle when packets should have been trapped"
+
+ devlink_trap_group_stats_idle_test $group_name
+ check_fail $? "Trap group idle when packets should have been trapped"
+}
+
+devlink_trap_drop_test()
+{
+ local trap_name=$1; shift
+ local group_name=$1; shift
+ local dev=$1; shift
+
+ # This is the common part of all the tests. It checks that stats are
+ # initially idle, then non-idle after changing the trap action and
+ # finally idle again. It also makes sure the packets are dropped and
+ # never forwarded.
+ devlink_trap_stats_idle_test $trap_name
+ check_err $? "Trap stats not idle with initial drop action"
+ devlink_trap_group_stats_idle_test $group_name
+ check_err $? "Trap group stats not idle with initial drop action"
+
+
+ devlink_trap_action_set $trap_name "trap"
+ devlink_trap_stats_idle_test $trap_name
+ check_fail $? "Trap stats idle after setting action to trap"
+ devlink_trap_group_stats_idle_test $group_name
+ check_fail $? "Trap group stats idle after setting action to trap"
+
+ devlink_trap_action_set $trap_name "drop"
+
+ devlink_trap_stats_idle_test $trap_name
+ check_err $? "Trap stats not idle after setting action to drop"
+ devlink_trap_group_stats_idle_test $group_name
+ check_err $? "Trap group stats not idle after setting action to drop"
+
+ tc_check_packets "dev $dev egress" 101 0
+ check_err $? "Packets were not dropped"
+}
+
+devlink_trap_drop_cleanup()
+{
+ local mz_pid=$1; shift
+ local dev=$1; shift
+ local proto=$1; shift
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $dev egress protocol $proto pref 1 handle 101 flower
+}
diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
new file mode 100755
index 000000000000..eb8e2a23bbb4
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ethtool.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ same_speeds_autoneg_off
+ different_speeds_autoneg_off
+ combination_of_neg_on_and_off
+ advertise_subset_of_speeds
+ check_highest_speed_is_chosen
+ different_speeds_autoneg_on
+"
+NUM_NETIFS=2
+source lib.sh
+source ethtool_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+}
+
+different_speeds_get()
+{
+ local dev1=$1; shift
+ local dev2=$1; shift
+ local with_mode=$1; shift
+ local adver=$1; shift
+
+ local -a speeds_arr
+
+ speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver))
+ if [[ ${#speeds_arr[@]} < 2 ]]; then
+ check_err 1 "cannot check different speeds. There are not enough speeds"
+ fi
+
+ echo ${speeds_arr[0]} ${speeds_arr[1]}
+}
+
+same_speeds_autoneg_off()
+{
+ # Check that when each of the reported speeds is forced, the links come
+ # up and are operational.
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 0))
+
+ for speed in "${speeds_arr[@]}"; do
+ RET=0
+ ethtool_set $h1 speed $speed autoneg off
+ ethtool_set $h2 speed $speed autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "speed $speed autoneg off"
+ log_test "force of same speed autoneg off"
+ log_info "speed = $speed"
+ done
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+different_speeds_autoneg_off()
+{
+ # Test that when we force different speeds, links are not up and ping
+ # fails.
+ RET=0
+
+ local -a speeds_arr=($(different_speeds_get $h1 $h2 0 0))
+ local speed1=${speeds_arr[0]}
+ local speed2=${speeds_arr[1]}
+
+ ethtool_set $h1 speed $speed1 autoneg off
+ ethtool_set $h2 speed $speed2 autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_fail $? "ping with different speeds"
+
+ log_test "force of different speeds autoneg off"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+combination_of_neg_on_and_off()
+{
+ # Test that when one device is forced to a speed supported by both
+ # endpoints and the other device is configured to autoneg on, the links
+ # are up and ping passes.
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
+
+ for speed in "${speeds_arr[@]}"; do
+ RET=0
+ ethtool_set $h1 speed $speed autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "h1-speed=$speed autoneg off, h2 autoneg on"
+ log_test "one side with autoneg off and another with autoneg on"
+ log_info "force speed = $speed"
+ done
+
+ ethtool -s $h1 autoneg on
+}
+
+hex_speed_value_get()
+{
+ local speed=$1; shift
+
+ local shift_size=${speed_values[$speed]}
+ speed=$((0x1 << $"shift_size"))
+ printf "%#x" "$speed"
+}
+
+subset_of_common_speeds_get()
+{
+ local dev1=$1; shift
+ local dev2=$1; shift
+ local adver=$1; shift
+
+ local -a speeds_arr=($(common_speeds_get $dev1 $dev2 0 $adver))
+ local speed_to_advertise=0
+ local speed_to_remove=${speeds_arr[0]}
+ speed_to_remove+='base'
+
+ local -a speeds_mode_arr=($(common_speeds_get $dev1 $dev2 1 $adver))
+
+ for speed in ${speeds_mode_arr[@]}; do
+ if [[ $speed != $speed_to_remove* ]]; then
+ speed=$(hex_speed_value_get $speed)
+ speed_to_advertise=$(($speed_to_advertise | \
+ $speed))
+ fi
+
+ done
+
+ # Convert to hex.
+ printf "%#x" "$speed_to_advertise"
+}
+
+speed_to_advertise_get()
+{
+ # The function returns the hex number that is composed by OR-ing all
+ # the modes corresponding to the provided speed.
+ local speed_without_mode=$1; shift
+ local supported_speeds=("$@"); shift
+ local speed_to_advertise=0
+
+ speed_without_mode+='base'
+
+ for speed in ${supported_speeds[@]}; do
+ if [[ $speed == $speed_without_mode* ]]; then
+ speed=$(hex_speed_value_get $speed)
+ speed_to_advertise=$(($speed_to_advertise | \
+ $speed))
+ fi
+
+ done
+
+ # Convert to hex.
+ printf "%#x" "$speed_to_advertise"
+}
+
+advertise_subset_of_speeds()
+{
+ # Test that when one device advertises a subset of speeds and another
+ # advertises a specific speed (but all modes of this speed), the links
+ # are up and ping passes.
+ RET=0
+
+ local speed_1_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1)
+ ethtool_set $h1 advertise $speed_1_to_advertise
+
+ if [ $RET != 0 ]; then
+ log_test "advertise subset of speeds"
+ return
+ fi
+
+ local -a speeds_arr_without_mode=($(common_speeds_get $h1 $h2 0 1))
+ # Check only speeds that h1 advertised. Remove the first speed.
+ unset speeds_arr_without_mode[0]
+ local -a speeds_arr_with_mode=($(common_speeds_get $h1 $h2 1 1))
+
+ for speed_value in ${speeds_arr_without_mode[@]}; do
+ RET=0
+ local speed_2_to_advertise=$(speed_to_advertise_get $speed_value \
+ "${speeds_arr_with_mode[@]}")
+ ethtool_set $h2 advertise $speed_2_to_advertise
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise ($speed_value)"
+
+ log_test "advertise subset of speeds"
+ log_info "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise"
+ done
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+check_highest_speed_is_chosen()
+{
+ # Test that when one device advertises a subset of speeds, the other
+ # chooses the highest speed. This test checks configuration without
+ # traffic.
+ RET=0
+
+ local max_speed
+ local chosen_speed
+ local speed_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1)
+
+ ethtool_set $h1 advertise $speed_to_advertise
+
+ if [ $RET != 0 ]; then
+ log_test "check highest speed"
+ return
+ fi
+
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
+ # Remove the first speed, h1 does not advertise this speed.
+ unset speeds_arr[0]
+
+ max_speed=${speeds_arr[0]}
+ for current in ${speeds_arr[@]}; do
+ if [[ $current -gt $max_speed ]]; then
+ max_speed=$current
+ fi
+ done
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ chosen_speed=$(ethtool $h1 | grep 'Speed:')
+ chosen_speed=${chosen_speed%"Mb/s"*}
+ chosen_speed=${chosen_speed#*"Speed: "}
+ ((chosen_speed == max_speed))
+ check_err $? "h1 advertise $speed_to_advertise, h2 sync to speed $chosen_speed"
+
+ log_test "check highest speed"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+different_speeds_autoneg_on()
+{
+ # Test that when we configure links to advertise different speeds,
+ # links are not up and ping fails.
+ RET=0
+
+ local -a speeds=($(different_speeds_get $h1 $h2 1 1))
+ local speed1=${speeds[0]}
+ local speed2=${speeds[1]}
+
+ speed1=$(hex_speed_value_get $speed1)
+ speed2=$(hex_speed_value_get $speed2)
+
+ ethtool_set $h1 advertise $speed1
+ ethtool_set $h2 advertise $speed2
+
+ if (($RET)); then
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_fail $? "ping with different speeds autoneg on"
+ fi
+
+ log_test "advertise different speeds autoneg on"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+declare -gA speed_values
+eval "speed_values=($(speeds_arr_get))"
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh
new file mode 100755
index 000000000000..925d229a59d8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+speeds_arr_get()
+{
+ cmd='/ETHTOOL_LINK_MODE_[^[:space:]]*_BIT[[:space:]]+=[[:space:]]+/ \
+ {sub(/,$/, "") \
+ sub(/ETHTOOL_LINK_MODE_/,"") \
+ sub(/_BIT/,"") \
+ sub(/_Full/,"/Full") \
+ sub(/_Half/,"/Half");\
+ print "["$1"]="$3}'
+
+ awk "${cmd}" /usr/include/linux/ethtool.h
+}
+
+ethtool_set()
+{
+ local cmd="$@"
+ local out=$(ethtool -s $cmd 2>&1 | wc -l)
+
+ check_err $out "error in configuration. $cmd"
+}
+
+dev_speeds_get()
+{
+ local dev=$1; shift
+ local with_mode=$1; shift
+ local adver=$1; shift
+ local speeds_str
+
+ if (($adver)); then
+ mode="Advertised link modes"
+ else
+ mode="Supported link modes"
+ fi
+
+ speeds_str=$(ethtool "$dev" | \
+ # Snip everything before the link modes section.
+ sed -n '/'"$mode"':/,$p' | \
+ # Quit processing the rest at the start of the next section.
+ # When checking, skip the header of this section (hence the 2,).
+ sed -n '2,${/^[\t][^ \t]/q};p' | \
+ # Drop the section header of the current section.
+ cut -d':' -f2)
+
+ local -a speeds_arr=($speeds_str)
+ if [[ $with_mode -eq 0 ]]; then
+ for ((i=0; i<${#speeds_arr[@]}; i++)); do
+ speeds_arr[$i]=${speeds_arr[$i]%base*}
+ done
+ fi
+ echo ${speeds_arr[@]}
+}
+
+common_speeds_get()
+{
+ dev1=$1; shift
+ dev2=$1; shift
+ with_mode=$1; shift
+ adver=$1; shift
+
+ local -a dev1_speeds=($(dev_speeds_get $dev1 $with_mode $adver))
+ local -a dev2_speeds=($(dev_speeds_get $dev2 $with_mode $adver))
+
+ comm -12 \
+ <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \
+ <(printf '%s\n' "${dev2_speeds[@]}" | sort -u)
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 8b48ec54d058..1f64e7348f69 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -18,6 +18,8 @@ NETIF_CREATE=${NETIF_CREATE:=yes}
MCD=${MCD:=smcrouted}
MC_CLI=${MC_CLI:=smcroutectl}
PING_TIMEOUT=${PING_TIMEOUT:=5}
+WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
+INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -226,24 +228,45 @@ log_info()
setup_wait_dev()
{
local dev=$1; shift
+ local wait_time=${1:-$WAIT_TIME}; shift
- while true; do
+ setup_wait_dev_with_timeout "$dev" $INTERFACE_TIMEOUT $wait_time
+
+ if (($?)); then
+ check_err 1
+ log_test setup_wait_dev ": Interface $dev does not come up."
+ exit 1
+ fi
+}
+
+setup_wait_dev_with_timeout()
+{
+ local dev=$1; shift
+ local max_iterations=${1:-$WAIT_TIMEOUT}; shift
+ local wait_time=${1:-$WAIT_TIME}; shift
+ local i
+
+ for ((i = 1; i <= $max_iterations; ++i)); do
ip link show dev $dev up \
| grep 'state UP' &> /dev/null
if [[ $? -ne 0 ]]; then
sleep 1
else
- break
+ sleep $wait_time
+ return 0
fi
done
+
+ return 1
}
setup_wait()
{
local num_netifs=${1:-$NUM_NETIFS}
+ local i
for ((i = 1; i <= num_netifs; ++i)); do
- setup_wait_dev ${NETIFS[p$i]}
+ setup_wait_dev ${NETIFS[p$i]} 0
done
# Make sure links are ready.
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index 315e934358d4..d93589bd4d1d 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -14,3 +14,14 @@ tc_check_packets()
select(.options.actions[0].stats.packets == $count)" \
&> /dev/null
}
+
+tc_check_packets_hitting()
+{
+ local id=$1
+ local handle=$2
+
+ cmd_jq "tc -j -s filter show $id" \
+ ".[] | select(.options.handle == $handle) | \
+ select(.options.actions[0].stats.packets > 0)" \
+ &> /dev/null
+}
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index 31ced79f4f25..5bb370a0857e 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -82,7 +82,9 @@ static int zflg; /* zero copy option. (MSG_ZEROCOPY for sender, mmap() for recei
static int xflg; /* hash received data (simple xor) (-h option) */
static int keepflag; /* -k option: receiver shall keep all received file in memory (no munmap() calls) */
-static int chunk_size = 512*1024;
+static size_t chunk_size = 512*1024;
+
+static size_t map_align;
unsigned long htotal;
@@ -118,6 +120,9 @@ void hash_zone(void *zone, unsigned int length)
htotal = temp;
}
+#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
+#define ALIGN_PTR_UP(p, ptr_align_to) ((typeof(p))ALIGN_UP((unsigned long)(p), ptr_align_to))
+
void *child_thread(void *arg)
{
unsigned long total_mmap = 0, total = 0;
@@ -126,6 +131,7 @@ void *child_thread(void *arg)
int flags = MAP_SHARED;
struct timeval t0, t1;
char *buffer = NULL;
+ void *raddr = NULL;
void *addr = NULL;
double throughput;
struct rusage ru;
@@ -142,9 +148,13 @@ void *child_thread(void *arg)
goto error;
}
if (zflg) {
- addr = mmap(NULL, chunk_size, PROT_READ, flags, fd, 0);
- if (addr == (void *)-1)
+ raddr = mmap(NULL, chunk_size + map_align, PROT_READ, flags, fd, 0);
+ if (raddr == (void *)-1) {
+ perror("mmap");
zflg = 0;
+ } else {
+ addr = ALIGN_PTR_UP(raddr, map_align);
+ }
}
while (1) {
struct pollfd pfd = { .fd = fd, .events = POLLIN, };
@@ -222,7 +232,7 @@ error:
free(buffer);
close(fd);
if (zflg)
- munmap(addr, chunk_size);
+ munmap(raddr, chunk_size + map_align);
pthread_exit(0);
}
@@ -270,6 +280,11 @@ static void setup_sockaddr(int domain, const char *str_addr,
static void do_accept(int fdlisten)
{
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+
if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT,
&chunk_size, sizeof(chunk_size)) == -1) {
perror("setsockopt SO_RCVLOWAT");
@@ -288,7 +303,7 @@ static void do_accept(int fdlisten)
perror("accept");
continue;
}
- res = pthread_create(&th, NULL, child_thread,
+ res = pthread_create(&th, &attr, child_thread,
(void *)(unsigned long)fd);
if (res) {
errno = res;
@@ -298,6 +313,30 @@ static void do_accept(int fdlisten)
}
}
+/* Each thread should reserve a big enough vma to avoid
+ * spinlock collisions in ptl locks.
+ * This size is 2MB on x86_64, and is exported in /proc/meminfo.
+ */
+static unsigned long default_huge_page_size(void)
+{
+ FILE *f = fopen("/proc/meminfo", "r");
+ unsigned long hps = 0;
+ size_t linelen = 0;
+ char *line = NULL;
+
+ if (!f)
+ return 0;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+ free(line);
+ fclose(f);
+ return hps;
+}
+
int main(int argc, char *argv[])
{
struct sockaddr_storage listenaddr, addr;
@@ -309,7 +348,7 @@ int main(int argc, char *argv[])
int sflg = 0;
int mss = 0;
- while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:")) != -1) {
+ while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:C:a:")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
@@ -349,10 +388,24 @@ int main(int argc, char *argv[])
case 'P':
max_pacing_rate = atoi(optarg) ;
break;
+ case 'C':
+ chunk_size = atol(optarg);
+ break;
+ case 'a':
+ map_align = atol(optarg);
+ break;
default:
exit(1);
}
}
+ if (!map_align) {
+ map_align = default_huge_page_size();
+ /* if really /proc/meminfo is not helping,
+ * we use the default x86_64 hugepagesize.
+ */
+ if (!map_align)
+ map_align = 2*1024*1024;
+ }
if (sflg) {
int fdlisten = socket(cfg_family, SOCK_STREAM, 0);
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 4c285b6e1db8..1c8f194d6556 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -898,6 +898,114 @@ TEST_F(tls, nonblocking)
}
}
+static void
+test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
+ bool sendpg, unsigned int n_readers, unsigned int n_writers)
+{
+ const unsigned int n_children = n_readers + n_writers;
+ const size_t data = 6 * 1000 * 1000;
+ const size_t file_sz = data / 100;
+ size_t read_bias, write_bias;
+ int i, fd, child_id;
+ char buf[file_sz];
+ pid_t pid;
+
+ /* Only allow multiples for simplicity */
+ ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
+ read_bias = n_writers / n_readers ?: 1;
+ write_bias = n_readers / n_writers ?: 1;
+
+ /* prep a file to send */
+ fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
+ ASSERT_GE(fd, 0);
+
+ memset(buf, 0xac, file_sz);
+ ASSERT_EQ(write(fd, buf, file_sz), file_sz);
+
+ /* spawn children */
+ for (child_id = 0; child_id < n_children; child_id++) {
+ pid = fork();
+ ASSERT_NE(pid, -1);
+ if (!pid)
+ break;
+ }
+
+ /* parent waits for all children */
+ if (pid) {
+ for (i = 0; i < n_children; i++) {
+ int status;
+
+ wait(&status);
+ EXPECT_EQ(status, 0);
+ }
+
+ return;
+ }
+
+ /* Split threads for reading and writing */
+ if (child_id < n_readers) {
+ size_t left = data * read_bias;
+ char rb[8001];
+
+ while (left) {
+ int res;
+
+ res = recv(self->cfd, rb,
+ left > sizeof(rb) ? sizeof(rb) : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ } else {
+ size_t left = data * write_bias;
+
+ while (left) {
+ int res;
+
+ ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
+ if (sendpg)
+ res = sendfile(self->fd, fd, NULL,
+ left > file_sz ? file_sz : left);
+ else
+ res = send(self->fd, buf,
+ left > file_sz ? file_sz : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ }
+}
+
+TEST_F(tls, mutliproc_even)
+{
+ test_mutliproc(_metadata, self, false, 6, 6);
+}
+
+TEST_F(tls, mutliproc_readers)
+{
+ test_mutliproc(_metadata, self, false, 4, 12);
+}
+
+TEST_F(tls, mutliproc_writers)
+{
+ test_mutliproc(_metadata, self, false, 10, 2);
+}
+
+TEST_F(tls, mutliproc_sendpage_even)
+{
+ test_mutliproc(_metadata, self, true, 6, 6);
+}
+
+TEST_F(tls, mutliproc_sendpage_readers)
+{
+ test_mutliproc(_metadata, self, true, 4, 12);
+}
+
+TEST_F(tls, mutliproc_sendpage_writers)
+{
+ test_mutliproc(_metadata, self, true, 10, 2);
+}
+
TEST_F(tls, control_msg)
{
if (self->notls)
diff --git a/tools/testing/selftests/net/traceroute.sh b/tools/testing/selftests/net/traceroute.sh
new file mode 100755
index 000000000000..de9ca97abc30
--- /dev/null
+++ b/tools/testing/selftests/net/traceroute.sh
@@ -0,0 +1,322 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run traceroute/traceroute6 tests
+#
+
+VERBOSE=0
+PAUSE_ON_FAIL=no
+
+################################################################################
+#
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf "TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+run_cmd()
+{
+ local ns
+ local cmd
+ local out
+ local rc
+
+ ns="$1"
+ shift
+ cmd="$*"
+
+ if [ "$VERBOSE" = "1" ]; then
+ printf " COMMAND: $cmd\n"
+ fi
+
+ out=$(eval ip netns exec ${ns} ${cmd} 2>&1)
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo " $out"
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+
+ return $rc
+}
+
+################################################################################
+# create namespaces and interconnects
+
+create_ns()
+{
+ local ns=$1
+ local addr=$2
+ local addr6=$3
+
+ [ -z "${addr}" ] && addr="-"
+ [ -z "${addr6}" ] && addr6="-"
+
+ ip netns add ${ns}
+
+ ip netns exec ${ns} ip link set lo up
+ if [ "${addr}" != "-" ]; then
+ ip netns exec ${ns} ip addr add dev lo ${addr}
+ fi
+ if [ "${addr6}" != "-" ]; then
+ ip netns exec ${ns} ip -6 addr add dev lo ${addr6}
+ fi
+
+ ip netns exec ${ns} ip ro add unreachable default metric 8192
+ ip netns exec ${ns} ip -6 ro add unreachable default metric 8192
+
+ ip netns exec ${ns} sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.accept_dad=0
+}
+
+# create veth pair to connect namespaces and apply addresses.
+connect_ns()
+{
+ local ns1=$1
+ local ns1_dev=$2
+ local ns1_addr=$3
+ local ns1_addr6=$4
+ local ns2=$5
+ local ns2_dev=$6
+ local ns2_addr=$7
+ local ns2_addr6=$8
+
+ ip netns exec ${ns1} ip li add ${ns1_dev} type veth peer name tmp
+ ip netns exec ${ns1} ip li set ${ns1_dev} up
+ ip netns exec ${ns1} ip li set tmp netns ${ns2} name ${ns2_dev}
+ ip netns exec ${ns2} ip li set ${ns2_dev} up
+
+ if [ "${ns1_addr}" != "-" ]; then
+ ip netns exec ${ns1} ip addr add dev ${ns1_dev} ${ns1_addr}
+ fi
+
+ if [ "${ns2_addr}" != "-" ]; then
+ ip netns exec ${ns2} ip addr add dev ${ns2_dev} ${ns2_addr}
+ fi
+
+ if [ "${ns1_addr6}" != "-" ]; then
+ ip netns exec ${ns1} ip addr add dev ${ns1_dev} ${ns1_addr6}
+ fi
+
+ if [ "${ns2_addr6}" != "-" ]; then
+ ip netns exec ${ns2} ip addr add dev ${ns2_dev} ${ns2_addr6}
+ fi
+}
+
+################################################################################
+# traceroute6 test
+#
+# Verify that in this scenario
+#
+# ------------------------ N2
+# | |
+# ------ ------ N3 ----
+# | R1 | | R2 |------|H2|
+# ------ ------ ----
+# | |
+# ------------------------ N1
+# |
+# ----
+# |H1|
+# ----
+#
+# where H1's default route goes through R1 and R1's default route goes
+# through R2 over N2, traceroute6 from H1 to H2 reports R2's address
+# on N2 and not N1.
+#
+# Addresses are assigned as follows:
+#
+# N1: 2000:101::/64
+# N2: 2000:102::/64
+# N3: 2000:103::/64
+#
+# R1's host part of address: 1
+# R2's host part of address: 2
+# H1's host part of address: 3
+# H2's host part of address: 4
+#
+# For example:
+# the IPv6 address of R1's interface on N2 is 2000:102::1/64
+
+cleanup_traceroute6()
+{
+ local ns
+
+ for ns in host-1 host-2 router-1 router-2
+ do
+ ip netns del ${ns} 2>/dev/null
+ done
+}
+
+setup_traceroute6()
+{
+ brdev=br0
+
+ # start clean
+ cleanup_traceroute6
+
+ set -e
+ create_ns host-1
+ create_ns host-2
+ create_ns router-1
+ create_ns router-2
+
+ # Setup N3
+ connect_ns router-2 eth3 - 2000:103::2/64 host-2 eth3 - 2000:103::4/64
+ ip netns exec host-2 ip route add default via 2000:103::2
+
+ # Setup N2
+ connect_ns router-1 eth2 - 2000:102::1/64 router-2 eth2 - 2000:102::2/64
+ ip netns exec router-1 ip route add default via 2000:102::2
+
+ # Setup N1. host-1 and router-2 connect to a bridge in router-1.
+ ip netns exec router-1 ip link add name ${brdev} type bridge
+ ip netns exec router-1 ip link set ${brdev} up
+ ip netns exec router-1 ip addr add 2000:101::1/64 dev ${brdev}
+
+ connect_ns host-1 eth0 - 2000:101::3/64 router-1 eth0 - -
+ ip netns exec router-1 ip link set dev eth0 master ${brdev}
+ ip netns exec host-1 ip route add default via 2000:101::1
+
+ connect_ns router-2 eth1 - 2000:101::2/64 router-1 eth1 - -
+ ip netns exec router-1 ip link set dev eth1 master ${brdev}
+
+ # Prime the network
+ ip netns exec host-1 ping6 -c5 2000:103::4 >/dev/null 2>&1
+
+ set +e
+}
+
+run_traceroute6()
+{
+ if [ ! -x "$(command -v traceroute6)" ]; then
+ echo "SKIP: Could not run IPV6 test without traceroute6"
+ return
+ fi
+
+ setup_traceroute6
+
+ # traceroute6 host-2 from host-1 (expects 2000:102::2)
+ run_cmd host-1 "traceroute6 2000:103::4 | grep -q 2000:102::2"
+ log_test $? 0 "IPV6 traceroute"
+
+ cleanup_traceroute6
+}
+
+################################################################################
+# traceroute test
+#
+# Verify that traceroute from H1 to H2 shows 1.0.1.1 in this scenario
+#
+# 1.0.3.1/24
+# ---- 1.0.1.3/24 1.0.1.1/24 ---- 1.0.2.1/24 1.0.2.4/24 ----
+# |H1|--------------------------|R1|--------------------------|H2|
+# ---- N1 ---- N2 ----
+#
+# where net.ipv4.icmp_errors_use_inbound_ifaddr is set on R1 and
+# 1.0.3.1/24 and 1.0.1.1/24 are respectively R1's primary and secondary
+# address on N1.
+#
+
+cleanup_traceroute()
+{
+ local ns
+
+ for ns in host-1 host-2 router
+ do
+ ip netns del ${ns} 2>/dev/null
+ done
+}
+
+setup_traceroute()
+{
+ # start clean
+ cleanup_traceroute
+
+ set -e
+ create_ns host-1
+ create_ns host-2
+ create_ns router
+
+ connect_ns host-1 eth0 1.0.1.3/24 - \
+ router eth1 1.0.3.1/24 -
+ ip netns exec host-1 ip route add default via 1.0.1.1
+
+ ip netns exec router ip addr add 1.0.1.1/24 dev eth1
+ ip netns exec router sysctl -qw \
+ net.ipv4.icmp_errors_use_inbound_ifaddr=1
+
+ connect_ns host-2 eth0 1.0.2.4/24 - \
+ router eth2 1.0.2.1/24 -
+ ip netns exec host-2 ip route add default via 1.0.2.1
+
+ # Prime the network
+ ip netns exec host-1 ping -c5 1.0.2.4 >/dev/null 2>&1
+
+ set +e
+}
+
+run_traceroute()
+{
+ if [ ! -x "$(command -v traceroute)" ]; then
+ echo "SKIP: Could not run IPV4 test without traceroute"
+ return
+ fi
+
+ setup_traceroute
+
+ # traceroute host-2 from host-1 (expects 1.0.1.1). Takes a while.
+ run_cmd host-1 "traceroute 1.0.2.4 | grep -q 1.0.1.1"
+ log_test $? 0 "IPV4 traceroute"
+
+ cleanup_traceroute
+}
+
+################################################################################
+# Run tests
+
+run_tests()
+{
+ run_traceroute6
+ run_traceroute
+}
+
+################################################################################
+# main
+
+declare -i nfail=0
+declare -i nsuccess=0
+
+while getopts :pv o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=$(($VERBOSE + 1));;
+ *) exit 1;;
+ esac
+done
+
+run_tests
+
+printf "\nTests passed: %3d\n" ${nsuccess}
+printf "Tests failed: %3d\n" ${nfail}
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index bd4a7247b44f..c0dd10257df5 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -44,6 +44,46 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
}
#endif
+static void show_flag_test(int rq_index, unsigned int flags, int err)
+{
+ printf("PTP_EXTTS_REQUEST%c flags 0x%08x : (%d) %s\n",
+ rq_index ? '1' + rq_index : ' ',
+ flags, err, strerror(errno));
+ /* sigh, uClibc ... */
+ errno = 0;
+}
+
+static void do_flag_test(int fd, unsigned int index)
+{
+ struct ptp_extts_request extts_request;
+ unsigned long request[2] = {
+ PTP_EXTTS_REQUEST,
+ PTP_EXTTS_REQUEST2,
+ };
+ unsigned int enable_flags[5] = {
+ PTP_ENABLE_FEATURE,
+ PTP_ENABLE_FEATURE | PTP_RISING_EDGE,
+ PTP_ENABLE_FEATURE | PTP_FALLING_EDGE,
+ PTP_ENABLE_FEATURE | PTP_RISING_EDGE | PTP_FALLING_EDGE,
+ PTP_ENABLE_FEATURE | (PTP_EXTTS_VALID_FLAGS + 1),
+ };
+ int err, i, j;
+
+ memset(&extts_request, 0, sizeof(extts_request));
+ extts_request.index = index;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 5; j++) {
+ extts_request.flags = enable_flags[j];
+ err = ioctl(fd, request[i], &extts_request);
+ show_flag_test(i, extts_request.flags, err);
+
+ extts_request.flags = 0;
+ err = ioctl(fd, request[i], &extts_request);
+ }
+ }
+}
+
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
@@ -96,7 +136,8 @@ static void usage(char *progname)
" -s set the ptp clock time from the system time\n"
" -S set the system time from the ptp clock time\n"
" -t val shift the ptp clock time by 'val' seconds\n"
- " -T val set the ptp clock time to 'val' seconds\n",
+ " -T val set the ptp clock time to 'val' seconds\n"
+ " -z test combinations of rising/falling external time stamp flags\n",
progname);
}
@@ -122,6 +163,7 @@ int main(int argc, char *argv[])
int adjtime = 0;
int capabilities = 0;
int extts = 0;
+ int flagtest = 0;
int gettime = 0;
int index = 0;
int list_pins = 0;
@@ -138,7 +180,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@@ -191,6 +233,9 @@ int main(int argc, char *argv[])
settime = 3;
seconds = atoi(optarg);
break;
+ case 'z':
+ flagtest = 1;
+ break;
case 'h':
usage(progname);
return 0;
@@ -322,6 +367,10 @@ int main(int argc, char *argv[])
}
}
+ if (flagtest) {
+ do_flag_test(fd, index);
+ }
+
if (list_pins) {
int n_pins = 0;
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
index e31a080edc49..866f0efd0859 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
@@ -168,6 +168,54 @@
]
},
{
+ "id": "09d2",
+ "name": "Add mpls dec_ttl action with opcode and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls dec_ttl pipe index 8 cookie aabbccddeeff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*dec_ttl pipe.*index 8 ref.*cookie aabbccddeeff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
+ "id": "c170",
+ "name": "Add mpls dec_ttl action with opcode and cookie of max length",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls dec_ttl continue index 8 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*dec_ttl continue.*index 8 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "9118",
"name": "Add mpls dec_ttl action with invalid opcode",
"category": [
@@ -302,6 +350,30 @@
]
},
{
+ "id": "91fb",
+ "name": "Add mpls pop action with ip proto and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls pop protocol ipv4 cookie 12345678",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*pop.*protocol.*ip.*pipe.*ref 1.*cookie 12345678",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "92fe",
"name": "Add mpls pop action with mpls proto",
"category": [
@@ -508,6 +580,30 @@
]
},
{
+ "id": "7c34",
+ "name": "Add mpls push action with label, tc ttl and cookie of max length",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls push label 20 tc 3 ttl 128 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*push.*protocol.*mpls_uc.*label.*20.*tc.*3.*ttl.*128.*pipe.*ref 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "16eb",
"name": "Add mpls push action with label and bos",
"category": [
@@ -828,6 +924,30 @@
]
},
{
+ "id": "77c1",
+ "name": "Add mpls mod action with mpls ttl and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls mod ttl 128 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*modify.*ttl.*128.*pipe.*ref 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "b80f",
"name": "Add mpls mod action with mpls max ttl",
"category": [
@@ -1037,6 +1157,31 @@
]
},
{
+ "id": "95a9",
+ "name": "Replace existing mpls push action with new label, tc, ttl and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mpls push label 20 tc 3 ttl 128 index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2"
+ ],
+ "cmdUnderTest": "$TC actions replace action mpls push label 30 tc 2 ttl 125 pipe index 1 cookie aa11bb22cc33",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action mpls index 1",
+ "matchPattern": "action order [0-9]+: mpls.*push.*protocol.*mpls_uc.*label.*30 tc 2 ttl 125 pipe.*index 1.*cookie aa11bb22cc33",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "6cce",
"name": "Delete mpls pop action",
"category": [
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
index 6035956a1a73..f8ea6f5fa8e9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -349,6 +349,256 @@
]
},
{
+ "id": "1762",
+ "name": "Add pedit action with RAW_OP offset u8 clear value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 clear",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "bcee",
+ "name": "Add pedit action with RAW_OP offset u8 retain value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 set 0x11 retain 0x0f",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 01000000 mask f0ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "e89f",
+ "name": "Add pedit action with RAW_OP offset u16 retain value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u16 set 0x1122 retain 0xff00",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 11000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c282",
+ "name": "Add pedit action with RAW_OP offset u32 clear value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u32 clear",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c422",
+ "name": "Add pedit action with RAW_OP offset u16 invert value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u16 invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 12: val ffff0000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "d3d3",
+ "name": "Add pedit action with RAW_OP offset u32 invert value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u32 invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 12: val ffffffff mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "57e5",
+ "name": "Add pedit action with RAW_OP offset u8 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "99e0",
+ "name": "Add pedit action with RAW_OP offset u16 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u16 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "1892",
+ "name": "Add pedit action with RAW_OP offset u32 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u32 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "4b60",
+ "name": "Add pedit action with RAW_OP negative offset u16/u32 set value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset -14 u16 set 0x0000 munge offset -12 u32 set 0x00000100 munge offset -8 u32 set 0x0aaf0100 munge offset -4 u32 set 0x0008eb06 pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 4.*key #0.*at -16: val 00000000 mask ffff0000.*key #1.*at -12: val 00000100 mask 00000000.*key #2.*at -8: val 0aaf0100 mask 00000000.*key #3.*at -4: val 0008eb06 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "a5a7",
"name": "Add pedit action with LAYERED_OP eth set src",
"category": [
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
new file mode 100644
index 000000000000..76ae03a64506
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
@@ -0,0 +1,325 @@
+[
+ {
+ "id": "7a92",
+ "name": "Add basic filter with cmp ematch u8/link layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 0 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2e8a",
+ "name": "Add basic filter with cmp ematch u8/link layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 0 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4d9f",
+ "name": "Add basic filter with cmp ematch u16/link layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u16 at 0 layer 0 mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u16 at 0 layer 0 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4943",
+ "name": "Add basic filter with cmp ematch u32/link layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u32 at 4 layer link mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u32 at 4 layer 0 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7559",
+ "name": "Add basic filter with cmp ematch u8/network layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xab protocol ip prio 11 basic match 'cmp(u8 at 0 layer 1 mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xab prio 11 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 11 basic.*handle 0xab flowid 1:1.*cmp\\(u8 at 0 layer 1 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "aff4",
+ "name": "Add basic filter with cmp ematch u8/network layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xab protocol ip prio 11 basic match 'cmp(u8 at 0 layer 1 mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xab prio 11 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 11 basic.*handle 0xab flowid 1:1.*cmp\\(u8 at 0 layer 1 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "c732",
+ "name": "Add basic filter with cmp ematch u16/network layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0x100 protocol ip prio 100 basic match 'cmp(u16 at 0 layer network mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0x100 prio 100 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 100 basic.*handle 0x100.*cmp\\(u16 at 0 layer 1 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "32d8",
+ "name": "Add basic filter with cmp ematch u32/network layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0x112233 protocol ip prio 7 basic match 'cmp(u32 at 4 layer network mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0x112233 prio 7 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 7 basic.*handle 0x112233.*cmp\\(u32 at 4 layer 1 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6f5e",
+ "name": "Add basic filter with cmp ematch u8/transport layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer transport mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 2 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0752",
+ "name": "Add basic filter with cmp ematch u8/transport layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer transport mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 2 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7e07",
+ "name": "Add basic filter with cmp ematch u16/transport layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u16 at 0 layer 2 mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u16 at 0 layer 2 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "62d7",
+ "name": "Add basic filter with cmp ematch u32/transport layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u32 at 4 layer transport mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u32 at 4 layer 2 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "304b",
+ "name": "Add basic filter with NOT cmp ematch rule and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'not cmp(u8 at 0 layer link mask 0xff eq 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*NOT cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8ecb",
+ "name": "Add basic filter with two ANDed cmp ematch rules and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b1ad",
+ "name": "Add basic filter with two ORed cmp ematch rules and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) or cmp(u16 at 8 layer link mask 0x00ff gt 7)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*OR cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4600",
+ "name": "Add basic filter with two ANDed cmp ematch rules and one ORed ematch rule and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7) or cmp(u32 at 4 layer network mask 0xa0a0 lt 3)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*OR cmp\\(u32 at 4 layer 1 mask 0xa0a0 lt 3\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "bc59",
+ "name": "Add basic filter with two ANDed cmp ematch rules and one NOT ORed ematch rule and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7) or not cmp(u32 at 4 layer network mask 0xa0a0 lt 3)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*OR NOT cmp\\(u32 at 4 layer 1 mask 0xa0a0 lt 3\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index cb3fc09645c4..485cf06ef013 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -71,7 +71,7 @@ int main(int argc, char **argv)
flags |= MAP_SHARED;
break;
case 'H':
- flags |= MAP_HUGETLB;
+ flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
break;
default:
return -1;
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.c b/tools/usb/usbip/libsrc/usbip_device_driver.c
index 051d7d3f443b..927a151fa9aa 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.c
@@ -69,7 +69,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
FILE *fd = NULL;
struct udev_device *plat;
const char *speed;
- int ret = 0;
+ size_t ret;
plat = udev_device_get_parent(sdev);
path = udev_device_get_syspath(plat);
@@ -79,8 +79,10 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
if (!fd)
return -1;
ret = fread((char *) &descr, sizeof(descr), 1, fd);
- if (ret < 0)
+ if (ret != 1) {
+ err("Cannot read vudc device descr file: %s", strerror(errno));
goto err;
+ }
fclose(fd);
copy_descr_attr(dev, &descr, bDeviceClass);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d6f0696d98ef..13efc291b1c7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -50,6 +50,7 @@
#include <linux/bsearch.h>
#include <linux/io.h>
#include <linux/lockdep.h>
+#include <linux/kthread.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -121,9 +122,22 @@ static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#define KVM_COMPAT(c) .compat_ioctl = (c)
#else
+/*
+ * For architectures that don't implement a compat infrastructure,
+ * adopt a double line of defense:
+ * - Prevent a compat task from opening /dev/kvm
+ * - If the open has been done by a 64bit task, and the KVM fd
+ * passed to a compat task, let the ioctls fail.
+ */
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg) { return -EINVAL; }
-#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
+
+static int kvm_no_compat_open(struct inode *inode, struct file *file)
+{
+ return is_compat_task() ? -ENODEV : 0;
+}
+#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
+ .open = kvm_no_compat_open
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
@@ -149,10 +163,30 @@ __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
return 0;
}
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+{
+ /*
+ * The metadata used by is_zone_device_page() to determine whether or
+ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
+ * the device has been pinned, e.g. by get_user_pages(). WARN if the
+ * page_count() is zero to help detect bad usage of this helper.
+ */
+ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
+ return false;
+
+ return is_zone_device_page(pfn_to_page(pfn));
+}
+
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
+ /*
+ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
+ * perspective they are "normal" pages, albeit with slightly different
+ * usage rules.
+ */
if (pfn_valid(pfn))
- return PageReserved(pfn_to_page(pfn));
+ return PageReserved(pfn_to_page(pfn)) &&
+ !kvm_is_zone_device_pfn(pfn);
return true;
}
@@ -625,6 +659,23 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
return 0;
}
+/*
+ * Called after the VM is otherwise initialized, but just before adding it to
+ * the vm_list.
+ */
+int __weak kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+/*
+ * Called just after removing the VM from the vm_list, but before doing any
+ * other destruction.
+ */
+void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+}
+
static struct kvm *kvm_create_vm(unsigned long type)
{
struct kvm *kvm = kvm_arch_alloc_vm();
@@ -645,6 +696,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err_no_srcu;
+ if (init_srcu_struct(&kvm->irq_srcu))
+ goto out_err_no_irq_srcu;
+
+ refcount_set(&kvm->users_count, 1);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memslots *slots = kvm_alloc_memslots();
@@ -662,7 +719,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
- refcount_set(&kvm->users_count, 1);
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_arch_destroy_vm;
@@ -675,13 +731,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
- if (init_srcu_struct(&kvm->srcu))
- goto out_err_no_srcu;
- if (init_srcu_struct(&kvm->irq_srcu))
- goto out_err_no_irq_srcu;
-
r = kvm_init_mmu_notifier(kvm);
if (r)
+ goto out_err_no_mmu_notifier;
+
+ r = kvm_arch_post_init_vm(kvm);
+ if (r)
goto out_err;
mutex_lock(&kvm_lock);
@@ -693,19 +748,24 @@ static struct kvm *kvm_create_vm(unsigned long type)
return kvm;
out_err:
- cleanup_srcu_struct(&kvm->irq_srcu);
-out_err_no_irq_srcu:
- cleanup_srcu_struct(&kvm->srcu);
-out_err_no_srcu:
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ if (kvm->mmu_notifier.ops)
+ mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
+#endif
+out_err_no_mmu_notifier:
hardware_disable_all();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
- WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
out_err_no_arch_destroy_vm:
+ WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm_get_bus(kvm, i));
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
+ cleanup_srcu_struct(&kvm->irq_srcu);
+out_err_no_irq_srcu:
+ cleanup_srcu_struct(&kvm->srcu);
+out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
return ERR_PTR(r);
@@ -737,6 +797,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
mutex_unlock(&kvm_lock);
+ kvm_arch_pre_destroy_vm(kvm);
+
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -1857,7 +1919,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
SetPageDirty(page);
@@ -1867,7 +1929,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn))
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
@@ -4371,3 +4433,86 @@ void kvm_exit(void)
kvm_vfio_ops_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
+
+struct kvm_vm_worker_thread_context {
+ struct kvm *kvm;
+ struct task_struct *parent;
+ struct completion init_done;
+ kvm_vm_thread_fn_t thread_fn;
+ uintptr_t data;
+ int err;
+};
+
+static int kvm_vm_worker_thread(void *context)
+{
+ /*
+ * The init_context is allocated on the stack of the parent thread, so
+ * we have to locally copy anything that is needed beyond initialization
+ */
+ struct kvm_vm_worker_thread_context *init_context = context;
+ struct kvm *kvm = init_context->kvm;
+ kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
+ uintptr_t data = init_context->data;
+ int err;
+
+ err = kthread_park(current);
+ /* kthread_park(current) is never supposed to return an error */
+ WARN_ON(err != 0);
+ if (err)
+ goto init_complete;
+
+ err = cgroup_attach_task_all(init_context->parent, current);
+ if (err) {
+ kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
+ __func__, err);
+ goto init_complete;
+ }
+
+ set_user_nice(current, task_nice(init_context->parent));
+
+init_complete:
+ init_context->err = err;
+ complete(&init_context->init_done);
+ init_context = NULL;
+
+ if (err)
+ return err;
+
+ /* Wait to be woken up by the spawner before proceeding. */
+ kthread_parkme();
+
+ if (!kthread_should_stop())
+ err = thread_fn(kvm, data);
+
+ return err;
+}
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr)
+{
+ struct kvm_vm_worker_thread_context init_context = {};
+ struct task_struct *thread;
+
+ *thread_ptr = NULL;
+ init_context.kvm = kvm;
+ init_context.parent = current;
+ init_context.thread_fn = thread_fn;
+ init_context.data = data;
+ init_completion(&init_context.init_done);
+
+ thread = kthread_run(kvm_vm_worker_thread, &init_context,
+ "%s-%d", name, task_pid_nr(current));
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
+ /* kthread_run is never supposed to return NULL */
+ WARN_ON(thread == NULL);
+
+ wait_for_completion(&init_context.init_done);
+
+ if (!init_context.err)
+ *thread_ptr = thread;
+
+ return init_context.err;
+}